mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-12-08 05:27:03 +08:00
Merge remote-tracking branch 'upstream/main' into main
# Conflicts: # cmd/openim-api/main.go # pkg/common/cmd/api.go # pkg/common/cmd/root.go # pkg/common/discovery_register/k8s_discovery_register.go # pkg/common/startrpc/start.go
This commit is contained in:
commit
da3f4a7142
@ -1,4 +1,4 @@
|
||||
<!-- # Ignore files and directories starting with a dot
|
||||
# Ignore files and directories starting with a dot
|
||||
|
||||
# Ignore specific files
|
||||
.dockerignore
|
||||
@ -29,4 +29,4 @@ assets/
|
||||
components/
|
||||
|
||||
# Ignore tools and scripts
|
||||
.github/ -->
|
||||
.github/
|
||||
|
||||
3
.github/workflows/bot-cherry-pick.yml
vendored
3
.github/workflows/bot-cherry-pick.yml
vendored
@ -19,7 +19,8 @@ on:
|
||||
jobs:
|
||||
cherry-pick:
|
||||
name: Cherry Pick
|
||||
if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/cherry-pick') && github.event.comment.user.login=='kubbot'
|
||||
# && github.event.comment.user.login=='kubbot'
|
||||
if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/cherry-pick')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the latest code
|
||||
|
||||
9
.github/workflows/build-docker-image.yml
vendored
9
.github/workflows/build-docker-image.yml
vendored
@ -120,6 +120,15 @@ jobs:
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ghcr.io/openimsdk/openim-server
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
type=schedule
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
|
||||
43
.github/workflows/build-openim-web-image.yml
vendored
43
.github/workflows/build-openim-web-image.yml
vendored
@ -114,15 +114,12 @@ jobs:
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
# ghcr.io/openimsdk/openim-web:latest
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta2
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
install: true
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
images: ghcr.io/openimsdk/openim-web
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@ -131,38 +128,12 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker openim-web
|
||||
id: meta1
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ghcr.io/openimsdk/openim-web
|
||||
|
||||
- name: Build and push Docker image for openim-web
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./build/images/openim-tools/openim-web/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta1.outputs.tags }}
|
||||
labels: ${{ steps.meta1.outputs.labels }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker openim-web
|
||||
id: meta2
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ghcr.io/openimsdk/component
|
||||
|
||||
- name: Build and push Docker image for component
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./build/images/openim-tools/component/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta2.outputs.tags }}
|
||||
labels: ${{ steps.meta2.outputs.labels }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
labels: ${{ steps.meta2.outputs.labels }}
|
||||
4
.github/workflows/create_branch_on_tag.yml
vendored
4
.github/workflows/create_branch_on_tag.yml
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Push Changes
|
||||
uses: stefanzweifel/git-auto-commit-action@v4
|
||||
uses: stefanzweifel/git-auto-commit-action@v5
|
||||
with:
|
||||
commit_message: "Auto Commit CHANGELOG"
|
||||
branch: release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Push Changes
|
||||
uses: stefanzweifel/git-auto-commit-action@v4
|
||||
uses: stefanzweifel/git-auto-commit-action@v5
|
||||
with:
|
||||
commit_message: "Auto Commit CHANGELOG"
|
||||
branch: main
|
||||
|
||||
4
.github/workflows/openimci.yml
vendored
4
.github/workflows/openimci.yml
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go_version: ["1.18","1.19","1.20","1.21"]
|
||||
go_version: ["1.19","1.20","1.21"]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
@ -86,7 +86,7 @@ jobs:
|
||||
run: sudo make clean
|
||||
|
||||
- name: Push Changes to Main
|
||||
uses: stefanzweifel/git-auto-commit-action@v4
|
||||
uses: stefanzweifel/git-auto-commit-action@v5
|
||||
with:
|
||||
commit_message: "cicd: robot automated Change"
|
||||
branch: main
|
||||
|
||||
@ -171,7 +171,7 @@ linters-settings:
|
||||
# exclude_godoc_examples: false
|
||||
funlen:
|
||||
lines: 150
|
||||
statements: 50
|
||||
statements: 80
|
||||
gci:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# only support one prefix
|
||||
|
||||
@ -98,6 +98,11 @@ OpenIM 我们的目标是建立一个顶级的开源社区。我们有一套标
|
||||
|
||||
+ [接口标准](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/interface.md)
|
||||
|
||||
+ [OpenIM配置和环境变量设置](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md)
|
||||
|
||||
> **Note**
|
||||
> 针对中国的用户,阅读我们的 [Docker 镜像标准](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md) 以便使用国内 aliyun 的镜像地址。OpenIM 也有针对中国的 gitee 同步仓库,你可以在 [gitee.com](https://gitee.com/openimsdk) 上找到它。
|
||||
|
||||
## :link: 链接
|
||||
|
||||
+ **[完整文档](https://doc.rentsoft.cn/)**
|
||||
|
||||
@ -126,7 +126,7 @@ It is recommended to use Docker Compose for deployment, which can easily and qui
|
||||
<details> <summary>Compile from Source</summary>
|
||||
|
||||
|
||||
Ur need `Go 1.18` or higher version, and `make`.
|
||||
Ur need `Go 1.20` or higher version, and `make`.
|
||||
|
||||
|
||||
```bash
|
||||
@ -212,6 +212,8 @@ Before you start, please make sure your changes are in demand. The best for that
|
||||
- [Interface Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/api.md)
|
||||
- [Log Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/logging.md)
|
||||
- [Error Code Standards](https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/error_code.md)
|
||||
- [OpenIM configuration and environment variable Settings](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md)
|
||||
|
||||
|
||||
## :busts_in_silhouette: Community
|
||||
|
||||
|
||||
10
build/images/Dockerfile
Normal file
10
build/images/Dockerfile
Normal file
@ -0,0 +1,10 @@
|
||||
FROM BASE_IMAGE
|
||||
|
||||
WORKDIR ${SERVER_WORKDIR}
|
||||
|
||||
# Set HTTP proxy
|
||||
ARG BINARY_NAME
|
||||
|
||||
COPY BINARY_NAME ./bin/BINARY_NAME
|
||||
|
||||
ENTRYPOINT ["./bin/BINARY_NAME"]
|
||||
@ -31,20 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-api
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-api /usr/bin/openim-api
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-api ./bin/openim-api
|
||||
|
||||
ENV PORT 10002
|
||||
EXPOSE 10002
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-api /usr/bin/openim-api
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "openim-api -c $OPENIM_SERVER_CONFIG_NAME --port $PORT"]
|
||||
ENTRYPOINT ["./bin/openim-api"]
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
COPY openim-api /usr/bin/
|
||||
|
||||
# nosemgrep: dockerfile.security.missing-user.missing-user
|
||||
ENTRYPOINT ["/usr/bin/openim-api"]
|
||||
# nosemgrep: dockerfile.security.missing-user.missing-user
|
||||
CMD ["--help"]
|
||||
@ -31,19 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go mod download
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-cmdutils
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-cmdutils /usr/bin/openim-cmdutils
|
||||
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder $OPENIM_SERVER_BINDIR/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder ${SERVER_WORKDIR}/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-cmdutils ./bin/openim-cmdutils
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-cmdutils /usr/bin/openim-cmdutils
|
||||
|
||||
ENTRYPOINT ["openim-cmdutils"]
|
||||
ENTRYPOINT ["./bin/openim-cmdutils"]
|
||||
|
||||
CMD ["--help"]
|
||||
|
||||
@ -31,18 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-crontask
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-crontask /usr/bin/openim-crontask
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-crontask ./bin/openim-crontask
|
||||
|
||||
ENV OPENIM_SERVER_CONFIG_NAME=/openim/openim-server/config
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-crontask /usr/bin/openim-crontask
|
||||
|
||||
CMD ["bash", "-c", "openim-crontask -c $OPENIM_SERVER_CONFIG_NAME"]
|
||||
ENTRYPOINT ["./bin/openim-crontask"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-msggateway
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-msggateway /usr/bin/openim-msggateway
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-msggateway ./bin/openim-msggateway
|
||||
|
||||
ENV PORT 10140 \
|
||||
WS_PORT 10001
|
||||
|
||||
EXPOSE 10140
|
||||
EXPOSE 10001
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-msggateway /usr/bin/openim-msggateway
|
||||
|
||||
CMD ["bash", "-c", "openim-msggateway -c $OPENIM_SERVER_CONFIG_NAME --port $PORT --ws_port $WS_PORT"]
|
||||
ENTRYPOINT ["./bin/openim-msggateway"]
|
||||
|
||||
@ -31,16 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-msgtransfer
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-msgtransfer /usr/bin/openim-msgtransfer
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-msgtransfer ./bin/openim-msgtransfer
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-msgtransfer /usr/bin/openim-msgtransfer
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then openim-msgtransfer -c $OPENIM_SERVER_CONFIG_NAME --prometheus_port $PROMETHEUS_PORT; else openim-msgtransfer -c $OPENIM_SERVER_CONFIG_NAME; fi"]
|
||||
ENTRYPOINT ["./bin/openim-msgtransfer"]
|
||||
|
||||
@ -15,7 +15,6 @@
|
||||
# OpenIM base image: https://github.com/openim-sigs/openim-base-image
|
||||
|
||||
# Set go mod installation source and proxy
|
||||
# docker run -e "PORT=10003" -e "PROMETHEUSORT=4321" --network host -it 67ef891ad1ff
|
||||
|
||||
FROM golang:1.20 AS builder
|
||||
|
||||
@ -32,21 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
RUN make build BINS=openim-push
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-push /usr/bin/openim-push
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-push ./bin/openim-push
|
||||
|
||||
ENV PORT 10170 \
|
||||
PROMETHEUS_PORT 20170
|
||||
|
||||
EXPOSE 10170
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-push /usr/bin/openim-push
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then openim-push -c $OPENIM_SERVER_CONFIG_NAME --port $PORT --prometheus_port $PROMETHEUS_PORT; else openim-push -c $OPENIM_SERVER_CONFIG_NAME --port $PORT; fi"]
|
||||
ENTRYPOINT ["./bin/openim-push"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-auth
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-auth /usr/bin/openim-rpc-auth
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-auth ./bin/openim-rpc-auth
|
||||
|
||||
ENV PORT 10160 \
|
||||
PROMETHEUS_PORT 20160
|
||||
|
||||
EXPOSE 10160
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-auth /usr/bin/openim-rpc-auth
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-auth --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-auth --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-auth"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-conversation
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-conversation /usr/bin/openim-rpc-conversation
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-conversation ./bin/openim-rpc-conversation
|
||||
|
||||
ENV PORT 10230 \
|
||||
PROMETHEUS_PORT 20230
|
||||
|
||||
EXPOSE 10230
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-conversation /usr/bin/openim-rpc-conversation
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-conversation --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-conversation --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-conversation"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-friend
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-friend /usr/bin/openim-rpc-friend
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-friend ./bin/openim-rpc-friend
|
||||
|
||||
ENV PORT 10120 \
|
||||
PROMETHEUS_PORT 20120
|
||||
|
||||
EXPOSE 10120
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-friend /usr/bin/openim-rpc-friend
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-friend --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-friend --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-friend"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-group
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-group /usr/bin/openim-rpc-group
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-group ./bin/openim-rpc-group
|
||||
|
||||
ENV PORT 10150 \
|
||||
PROMETHEUS_PORT 20150
|
||||
|
||||
EXPOSE 10150
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-group /usr/bin/openim-rpc-group
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-group --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-group --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-group"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-msg
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-msg /usr/bin/openim-rpc-msg
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-msg ./bin/openim-rpc-msg
|
||||
|
||||
ENV PORT 10130 \
|
||||
PROMETHEUS_PORT 20130
|
||||
|
||||
EXPOSE 10130
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-msg /usr/bin/openim-rpc-msg
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-msg --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-msg --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-msg"]
|
||||
|
||||
@ -31,23 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-third
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-third /usr/bin/openim-rpc-third
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-third ./bin/openim-rpc-third
|
||||
|
||||
ENV PORT 10190 \
|
||||
PROMETHEUS_PORT 21301
|
||||
|
||||
EXPOSE 10190
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-third /usr/bin/openim-rpc-third
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-third --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-third --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-third"]
|
||||
|
||||
@ -31,22 +31,15 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN make clean
|
||||
|
||||
RUN make build BINS=openim-rpc-user
|
||||
|
||||
RUN cp /openim/openim-server/_output/bin/platforms/$(go env GOOS)/$(go env GOARCH)/openim-rpc-user /usr/bin/openim-rpc-user
|
||||
|
||||
# FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
FROM ghcr.io/openim-sigs/openim-bash-image:latest
|
||||
|
||||
WORKDIR /openim/openim-server
|
||||
|
||||
COPY --from=builder /openim/openim-server/_output/bin/platforms /openim/openim-server/_output/bin/platforms/
|
||||
COPY --from=builder /openim/openim-server/config /openim/openim-server/config
|
||||
COPY --from=builder /usr/bin/openim-rpc-user ./bin/openim-rpc-user
|
||||
|
||||
ENV PORT 10110 \
|
||||
PROMETHEUS_PORT 20110
|
||||
|
||||
EXPOSE 10110
|
||||
|
||||
RUN mv ${OPENIM_SERVER_BINDIR}/platforms/$(get_os)/$(get_arch)/openim-rpc-user /usr/bin/openim-rpc-user
|
||||
|
||||
ENTRYPOINT ["bash", "-c", "if [[ -n $PROMETHEUS_PORT ]]; then "openim-rpc-user --port $PORT -c $OPENIM_SERVER_CONFIG_NAME" --prometheus_port $PROMETHEUS_PORT; else "openim-rpc-user --port $PORT -c $OPENIM_SERVER_CONFIG_NAME"; fi"]
|
||||
ENTRYPOINT ["./bin/openim-rpc-user"]
|
||||
|
||||
@ -17,15 +17,15 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
"net"
|
||||
_ "net/http/pprof"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/constant"
|
||||
"github.com/OpenIMSDK/tools/discoveryregistry"
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/api"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
@ -37,52 +37,62 @@ func main() {
|
||||
apiCmd.AddPortFlag()
|
||||
apiCmd.AddApi(run)
|
||||
if err := apiCmd.Execute(); err != nil {
|
||||
log.ZError(context.Background(), "API command execution failed", err)
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func run(port int) error {
|
||||
fmt.Println("*****openimapi port:", port)
|
||||
log.ZInfo(context.Background(), "Openim api port:", "port", port)
|
||||
|
||||
if port == 0 {
|
||||
return fmt.Errorf("port is empty")
|
||||
err := "port is empty"
|
||||
log.ZError(context.Background(), err, nil)
|
||||
|
||||
return fmt.Errorf(err)
|
||||
}
|
||||
rdb, err := cache.NewRedis()
|
||||
if err != nil {
|
||||
log.ZError(context.Background(), "Failed to initialize Redis", err)
|
||||
|
||||
return err
|
||||
}
|
||||
fmt.Println("api start init discov client")
|
||||
log.ZInfo(context.Background(), "api start init discov client")
|
||||
|
||||
var client discoveryregistry.SvcDiscoveryRegistry
|
||||
|
||||
// Determine whether zk is passed according to whether it is a clustered deployment
|
||||
client, err = discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery)
|
||||
/*
|
||||
client, err = openkeeper.NewClient(config.Config.Zookeeper.ZkAddr, config.Config.Zookeeper.Schema,
|
||||
openkeeper.WithFreq(time.Hour), openkeeper.WithUserNameAndPassword(
|
||||
config.Config.Zookeeper.Username,
|
||||
config.Config.Zookeeper.Password,
|
||||
), openkeeper.WithRoundRobin(), openkeeper.WithTimeout(10), openkeeper.WithLogger(log.NewZkLogger()))*/
|
||||
if err != nil {
|
||||
log.ZError(context.Background(), "Failed to initialize discovery register", err)
|
||||
|
||||
return err
|
||||
}
|
||||
if err = client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
|
||||
log.ZError(context.Background(), "Failed to create RPC root nodes", err)
|
||||
|
||||
return err
|
||||
}
|
||||
fmt.Println("api register public config to discov")
|
||||
log.ZInfo(context.Background(), "api register public config to discov")
|
||||
if err = client.RegisterConf2Registry(constant.OpenIMCommonConfigKey, config.Config.EncodeConfig()); err != nil {
|
||||
log.ZError(context.Background(), "Failed to register public config to discov", err)
|
||||
|
||||
return err
|
||||
}
|
||||
fmt.Println("api register public config to discov success")
|
||||
log.ZInfo(context.Background(), "api register public config to discov success")
|
||||
router := api.NewGinRouter(client, rdb)
|
||||
fmt.Println("api init router success")
|
||||
log.ZInfo(context.Background(), "api init router success")
|
||||
var address string
|
||||
if config.Config.Api.ListenIP != "" {
|
||||
address = net.JoinHostPort(config.Config.Api.ListenIP, strconv.Itoa(port))
|
||||
} else {
|
||||
address = net.JoinHostPort("0.0.0.0", strconv.Itoa(port))
|
||||
}
|
||||
fmt.Println("start api server, address: ", address, ", OpenIM version: ", config.Version)
|
||||
log.ZInfo(context.Background(), "start server success", "address", address, "version", config.Version)
|
||||
log.ZInfo(context.Background(), "start api server", "address", address, "OpenIM version", config.Version)
|
||||
|
||||
err = router.Run(address)
|
||||
if err != nil {
|
||||
log.ZError(context.Background(), "api run failed ", err, "address", address)
|
||||
log.ZError(context.Background(), "api run failed", err, "address", address)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -80,6 +80,12 @@ $ sudo sealos run labring/kubernetes:v1.25.0 labring/helm:v3.8.2 labring/calico:
|
||||
-p "$CLUSTER_PASSWORD"
|
||||
```
|
||||
|
||||
> **Node**
|
||||
> 卸载的方式:使用 `kubeadm` 卸载并不会清除 `etcd` 和 `cni` 相关,所以需要手动清除,或者使用 `sealos` 卸载。
|
||||
> ```bash
|
||||
> sealos reset
|
||||
> ```
|
||||
|
||||
### 安装 helm
|
||||
|
||||
helm通过打包的方式,支持发布的版本管理和控制,很大程度上简化了Kubernetes应用的部署和管理。
|
||||
@ -97,11 +103,17 @@ $ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bas
|
||||
$ helm repo add brigade https://openimsdk.github.io/openim-charts
|
||||
```
|
||||
|
||||
### OpenIM 的镜像策略
|
||||
|
||||
自动化提供的 aliyun, ghcr, docker hub: https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md
|
||||
|
||||
**本地化测试构建方法:**
|
||||
|
||||
```bash
|
||||
$ make image
|
||||
```
|
||||
|
||||
|
||||
### 容器化安装
|
||||
|
||||
具体安装步骤如下:
|
||||
|
||||
|
||||
|
||||
### Helm安装
|
||||
|
||||
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@ -1,38 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v2
|
||||
name: openim-chat
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
||||
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@ -1,22 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "openim-chat.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "openim-chat.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "openim-chat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "openim-chat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
@ -1,62 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "openim-chat.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "openim-chat.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "openim-chat.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "openim-chat.labels" -}}
|
||||
helm.sh/chart: {{ include "openim-chat.chart" . }}
|
||||
{{ include "openim-chat.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "openim-chat.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "openim-chat.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "openim-chat.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "openim-chat.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,75 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "openim-chat.fullname" . }}
|
||||
labels:
|
||||
{{- include "openim-chat.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "openim-chat.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "openim-chat.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "openim-chat.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@ -1,42 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "openim-chat.fullname" . }}
|
||||
labels:
|
||||
{{- include "openim-chat.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "openim-chat.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,75 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "openim-chat.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "openim-chat.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,29 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "openim-chat.fullname" . }}
|
||||
labels:
|
||||
{{- include "openim-chat.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "openim-chat.selectorLabels" . | nindent 4 }}
|
||||
@ -1,26 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "openim-chat.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "openim-chat.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,96 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Default values for openim-chat.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: nginx
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@ -1,20 +1,19 @@
|
||||
# OpenIM enviroment
|
||||
# OpenIM ENVIRONMENT CONFIGURATION
|
||||
|
||||
<!-- vscode-markdown-toc -->
|
||||
|
||||
* 1. [OpenIM Deployment Guide](#OpenIMDeploymentGuide)
|
||||
* 1.1. [Deployment Strategies](#DeploymentStrategies)
|
||||
* 1.2. [Source Code Deployment](#SourceCodeDeployment)
|
||||
* 1.3. [Docker Compose Deployment](#DockerComposeDeployment)
|
||||
* 1.4. [Environment Variable Configuration](#EnvironmentVariableConfiguration)
|
||||
* 1.4.1. [[1. Recommended using environment variables:](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#1-recommended-using-environment-variables)](#1.Recommendedusingenvironmentvariables:https:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.md1-recommended-using-environment-variables)
|
||||
* 1.4.2. [[Additional Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-configuration)](#AdditionalConfigurationhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-configuration)
|
||||
* 1.4.3. [[Security Considerations](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#security-considerations)](#SecurityConsiderationshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdsecurity-considerations)
|
||||
* 1.4.4. [[Data Management](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-management)](#DataManagementhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mddata-management)
|
||||
* 1.4.5. [[Monitoring and Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#monitoring-and-logging)](#MonitoringandLogginghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdmonitoring-and-logging)
|
||||
* 1.4.6. [[Troubleshooting](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#troubleshooting)](#Troubleshootinghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdtroubleshooting)
|
||||
* 1.4.7. [[Conclusion](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#conclusion)](#Conclusionhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdconclusion)
|
||||
* 1.4.8. [[Additional Resources](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-resources)](#AdditionalResourceshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-resources)
|
||||
* 1.4.1. [Recommended using environment variables](#Recommendedusingenvironmentvariables)
|
||||
* 1.4.2. [Additional Configuration](#AdditionalConfiguration)
|
||||
* 1.4.3. [Security Considerations](#SecurityConsiderations)
|
||||
* 1.4.4. [Data Management](#DataManagement)
|
||||
* 1.4.5. [Monitoring and Logging](#MonitoringandLogging)
|
||||
* 1.4.6. [Troubleshooting](#Troubleshooting)
|
||||
* 1.4.7. [Conclusion](#Conclusion)
|
||||
* 1.4.8. [Additional Resources](#AdditionalResources)
|
||||
* 2. [Further Configuration](#FurtherConfiguration)
|
||||
* 2.1. [Image Registry Configuration](#ImageRegistryConfiguration)
|
||||
* 2.2. [OpenIM Docker Network Configuration](#OpenIMDockerNetworkConfiguration)
|
||||
@ -39,11 +38,38 @@
|
||||
* 2.20.1. [General Configuration](#GeneralConfiguration)
|
||||
* 2.20.2. [Service-Specific Prometheus Ports](#Service-SpecificPrometheusPorts)
|
||||
|
||||
<!-- vscode-markdown-toc-config
|
||||
numbering=true
|
||||
autoSave=true
|
||||
/vscode-markdown-toc-config -->
|
||||
<!-- /vscode-markdown-toc -->
|
||||
## 0. <a name='TableofContents'></a>OpenIM Config File
|
||||
|
||||
Ensuring that OpenIM operates smoothly requires clear direction on the configuration file's location. Here's a detailed step-by-step guide on how to provide this essential path to OpenIM:
|
||||
|
||||
1. **Using the Command-line Argument**:
|
||||
|
||||
+ **For Configuration Path**: When initializing OpenIM, you can specify the path to the configuration file directly using the `-c` or `--config_folder_path` option.
|
||||
|
||||
```bash
|
||||
❯ _output/bin/platforms/linux/amd64/openim-api --config_folder_path="/your/config/folder/path"
|
||||
```
|
||||
|
||||
+ **For Port Specification**: Similarly, if you wish to designate a particular port, utilize the `-p` option followed by the desired port number.
|
||||
|
||||
```bash
|
||||
❯ _output/bin/platforms/linux/amd64/openim-api -p 1234
|
||||
```
|
||||
|
||||
Note: If the port is not specified here, OpenIM will fetch it from the configuration file. Setting the port via environment variables isn't supported. We recommend consolidating settings in the configuration file for a more consistent and streamlined setup.
|
||||
|
||||
2. **Leveraging the Environment Variable**:
|
||||
|
||||
You have the flexibility to determine OpenIM's configuration path by setting an `OPENIMCONFIG` environment variable. This method provides a seamless way to instruct OpenIM without command-line parameters every time.
|
||||
|
||||
```bash
|
||||
export OPENIMCONFIG="/path/to/your/config"
|
||||
```
|
||||
|
||||
3. **Relying on the Default Path**:
|
||||
|
||||
In scenarios where neither command-line arguments nor environment variables are provided, OpenIM will intuitively revert to the `config/` directory to locate its configuration.
|
||||
|
||||
|
||||
|
||||
## 1. <a name='OpenIMDeploymentGuide'></a>OpenIM Deployment Guide
|
||||
@ -96,7 +122,7 @@ Setting a variable, e.g., `export CHAT_BRANCH="release-v1.3"`, will prioritize `
|
||||
|
||||
For convenience, configuration through modifying environment variables is recommended:
|
||||
|
||||
#### 1.4.1. <a name='1.Recommendedusingenvironmentvariables:https:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.md1-recommended-using-environment-variables'></a>[1. Recommended using environment variables:](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#1-recommended-using-environment-variables)
|
||||
#### 1.4.1. <a name='Recommendedusingenvironmentvariables'></a>Recommended using environment variables
|
||||
|
||||
+ PASSWORD
|
||||
|
||||
@ -137,9 +163,9 @@ For convenience, configuration through modifying environment variables is recomm
|
||||
export DATA_DIR="/data/openim"
|
||||
```
|
||||
|
||||
#### 1.4.2. <a name='AdditionalConfigurationhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-configuration'></a>[Additional Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-configuration)
|
||||
#### 1.4.2. <a name='AdditionalConfiguration'></a>Additional Configuration
|
||||
|
||||
##### [MinIO Access and Secret Key](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#minio-access-and-secret-key)
|
||||
##### MinIO Access and Secret Key
|
||||
|
||||
To secure your MinIO server, you should set up an access key and secret key. These credentials are used to authenticate requests to your MinIO server.
|
||||
|
||||
@ -148,7 +174,7 @@ export MINIO_ACCESS_KEY="YourAccessKey"
|
||||
export MINIO_SECRET_KEY="YourSecretKey"
|
||||
```
|
||||
|
||||
##### [MinIO Browser](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#minio-browser)
|
||||
##### MinIO Browser
|
||||
|
||||
MinIO comes with an embedded web-based object browser. You can control the availability of the MinIO browser by setting the `MINIO_BROWSER` environment variable.
|
||||
|
||||
@ -156,9 +182,9 @@ MinIO comes with an embedded web-based object browser. You can control the avail
|
||||
export MINIO_BROWSER="on"
|
||||
```
|
||||
|
||||
#### 1.4.3. <a name='SecurityConsiderationshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdsecurity-considerations'></a>[Security Considerations](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#security-considerations)
|
||||
#### 1.4.3. <a name='SecurityConsiderations'></a>Security Considerations
|
||||
|
||||
##### [TLS/SSL Configuration](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#tls-ssl-configuration)
|
||||
##### TLS/SSL Configuration
|
||||
|
||||
For secure communication, it's recommended to enable TLS/SSL for your MinIO server. You can do this by providing the path to the SSL certificate and key files.
|
||||
|
||||
@ -166,9 +192,9 @@ For secure communication, it's recommended to enable TLS/SSL for your MinIO serv
|
||||
export MINIO_CERTS_DIR="/path/to/certs/directory"
|
||||
```
|
||||
|
||||
#### 1.4.4. <a name='DataManagementhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mddata-management'></a>[Data Management](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-management)
|
||||
#### 1.4.4. <a name='DataManagement'></a>Data Management
|
||||
|
||||
##### [Data Retention Policy](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#data-retention-policy)
|
||||
##### Data Retention Policy
|
||||
|
||||
You may want to set up a data retention policy to automatically delete objects after a specified period.
|
||||
|
||||
@ -176,7 +202,7 @@ You may want to set up a data retention policy to automatically delete objects a
|
||||
export MINIO_RETENTION_DAYS="30"
|
||||
```
|
||||
|
||||
#### 1.4.5. <a name='MonitoringandLogginghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdmonitoring-and-logging'></a>[Monitoring and Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#monitoring-and-logging)
|
||||
#### 1.4.5. <a name='MonitoringandLogging'></a>Monitoring and Logging
|
||||
|
||||
##### [Audit Logging](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#audit-logging)
|
||||
|
||||
@ -186,9 +212,9 @@ Enable audit logging to keep track of access and changes to your data.
|
||||
export MINIO_AUDIT="on"
|
||||
```
|
||||
|
||||
#### 1.4.6. <a name='Troubleshootinghttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdtroubleshooting'></a>[Troubleshooting](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#troubleshooting)
|
||||
#### 1.4.6. <a name='Troubleshooting'></a>Troubleshooting
|
||||
|
||||
##### [Debug Mode](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#debug-mode)
|
||||
##### Debug Mode
|
||||
|
||||
In case of issues, you may enable debug mode to get more detailed logs to assist in troubleshooting.
|
||||
|
||||
@ -196,11 +222,11 @@ In case of issues, you may enable debug mode to get more detailed logs to assist
|
||||
export MINIO_DEBUG="on"
|
||||
```
|
||||
|
||||
#### 1.4.7. <a name='Conclusionhttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdconclusion'></a>[Conclusion](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#conclusion)
|
||||
#### 1.4.7. <a name='Conclusion'></a>Conclusion
|
||||
|
||||
With the environment variables configured as per your requirements, your MinIO server should be ready to securely store and manage your object data. Ensure to verify the setup and monitor the logs for any unusual activities or errors. Regularly update the MinIO server and review your configuration to adapt to any changes or improvements in the MinIO system.
|
||||
|
||||
#### 1.4.8. <a name='AdditionalResourceshttps:github.comopenimsdkopen-im-serverblobmaindocscontribenvironment.mdadditional-resources'></a>[Additional Resources](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/environment.md#additional-resources)
|
||||
#### 1.4.8. <a name='AdditionalResources'></a>Additional Resources
|
||||
|
||||
+ [MinIO Client Quickstart Guide](https://docs.min.io/docs/minio-client-quickstart-guide)
|
||||
+ [MinIO Admin Complete Guide](https://docs.min.io/docs/minio-admin-complete-guide)
|
||||
|
||||
35
go.mod
35
go.mod
@ -1,6 +1,6 @@
|
||||
module github.com/openimsdk/open-im-server/v3
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
firebase.google.com/go v3.13.0+incompatible
|
||||
@ -24,34 +24,34 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/stretchr/testify v1.8.4
|
||||
go.mongodb.org/mongo-driver v1.12.1
|
||||
golang.org/x/image v0.12.0
|
||||
google.golang.org/api v0.143.0
|
||||
google.golang.org/grpc v1.58.2
|
||||
golang.org/x/image v0.13.0
|
||||
google.golang.org/api v0.148.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/mysql v1.5.1
|
||||
gorm.io/driver/mysql v1.5.2
|
||||
gorm.io/gorm v1.25.5
|
||||
)
|
||||
|
||||
require github.com/google/uuid v1.3.1
|
||||
|
||||
require (
|
||||
github.com/IBM/sarama v1.41.2
|
||||
github.com/OpenIMSDK/protocol v0.0.26
|
||||
github.com/OpenIMSDK/tools v0.0.14
|
||||
github.com/IBM/sarama v1.41.3
|
||||
github.com/OpenIMSDK/protocol v0.0.30
|
||||
github.com/OpenIMSDK/tools v0.0.15
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/go-sql-driver/mysql v1.7.1
|
||||
github.com/redis/go-redis/v9 v9.2.1
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.44
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.45
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.7 // indirect
|
||||
cloud.google.com/go v0.110.8 // indirect
|
||||
cloud.google.com/go/compute v1.23.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/firestore v1.12.0 // indirect
|
||||
cloud.google.com/go/iam v1.1.1 // indirect
|
||||
cloud.google.com/go/firestore v1.13.0 // indirect
|
||||
cloud.google.com/go/iam v1.1.2 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.1 // indirect
|
||||
cloud.google.com/go/storage v1.30.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@ -71,7 +71,6 @@ require (
|
||||
github.com/go-zookeeper/zk v1.0.3 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect
|
||||
@ -120,16 +119,16 @@ require (
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/oauth2 v0.13.0 // indirect
|
||||
golang.org/x/sync v0.4.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
69
go.sum
69
go.sum
@ -1,14 +1,14 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
|
||||
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/firestore v1.12.0 h1:aeEA/N7DW7+l2u5jtkO8I0qv0D95YwjggD8kUHrTHO4=
|
||||
cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
|
||||
cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
|
||||
cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
|
||||
cloud.google.com/go/firestore v1.13.0 h1:/3S4RssUV4GO/kvgJZB+tayjhOfyAHs+KcpJgRVu/Qk=
|
||||
cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8=
|
||||
cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4=
|
||||
cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
|
||||
cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=
|
||||
cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
|
||||
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
|
||||
@ -16,10 +16,10 @@ cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7Biccwk
|
||||
firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4=
|
||||
firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/IBM/sarama v1.41.2 h1:ZDBZfGPHAD4uuAtSv4U22fRZBgst0eEwGFzLj0fb85c=
|
||||
github.com/IBM/sarama v1.41.2/go.mod h1:xdpu7sd6OE1uxNdjYTSKUfY8FaKkJES9/+EyjSgiGQk=
|
||||
github.com/OpenIMSDK/protocol v0.0.26 h1:jzq7EemhkO8W5kvOOg2f0b7OAMzMPrIBUG0GVbSNhDA=
|
||||
github.com/OpenIMSDK/protocol v0.0.26/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
|
||||
github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
|
||||
github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
|
||||
github.com/OpenIMSDK/protocol v0.0.30 h1:MiHO6PyQMR9ojBHNnSFxCHLmsoE2xZqaiYj975JiZnM=
|
||||
github.com/OpenIMSDK/protocol v0.0.30/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
|
||||
github.com/OpenIMSDK/tools v0.0.14 h1:WLof/+WxyPyRST+QkoTKubYCiV73uCLiL8pgnpH/yKQ=
|
||||
github.com/OpenIMSDK/tools v0.0.14/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
|
||||
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
|
||||
@ -52,6 +52,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dtm-labs/rockscache v0.1.1 h1:6S1vgaHvGqrLd8Ka4hRTKeKPV7v+tT0MSkTIX81LRyA=
|
||||
@ -130,8 +131,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
@ -295,8 +295,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
|
||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0=
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.44 h1:Vvz28KVdmSUrwTH2MWgAMlhzUAh+lQBSSAW1J7qJDW8=
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.44/go.mod h1:LUFnaqRmGk6pEHOaRmdn2dCZR2j0cSsM5xowWFPTPao=
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.45 h1:5/ZGOv846tP6+2X7w//8QjLgH2KcUK+HciFbfjWquFU=
|
||||
github.com/tencentyun/cos-go-sdk-v5 v0.7.45/go.mod h1:DH9US8nB+AJXqwu/AMOrCFN1COv3dpytXuJWHgdg7kE=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
@ -335,15 +335,14 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/image v0.12.0 h1:w13vZbU4o5rKOFFR8y7M+c4A5jXDC0uXTdHYRP8X2DQ=
|
||||
golang.org/x/image v0.12.0/go.mod h1:Lu90jvHG7GfemOIcldsh9A2hS01ocl6oNO7ype5mEnk=
|
||||
golang.org/x/image v0.13.0 h1:3cge/F/QTkNLauhf2QoE9zp+7sr+ZcL4HnoZmdwg9sg=
|
||||
golang.org/x/image v0.13.0/go.mod h1:6mmbMOeV28HuMTgA6OSRkdXKYw/t5W9Uwn2Yv1r3Yxk=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -366,8 +365,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
|
||||
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
|
||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -375,9 +374,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -424,15 +422,14 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA=
|
||||
google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk=
|
||||
google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs=
|
||||
google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
@ -440,19 +437,19 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA=
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
||||
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -481,9 +478,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/mysql v1.5.1 h1:WUEH5VF9obL/lTtzjmML/5e6VfFR/788coz2uaVCAZw=
|
||||
gorm.io/driver/mysql v1.5.1/go.mod h1:Jo3Xu7mMhCyj8dlrb3WoCaRd1FhsVh+yMXb1jUInf5o=
|
||||
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/driver/mysql v1.5.2 h1:QC2HRskSE75wBuOxe0+iCkyJZ+RqpudsQtqkp+IMuXs=
|
||||
gorm.io/driver/mysql v1.5.2/go.mod h1:pQLhh1Ut/WUAySdTHwBpBv6+JKcj+ua4ZFx1QQTBzb8=
|
||||
gorm.io/gorm v1.25.2-0.20230530020048-26663ab9bf55/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
|
||||
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@ -40,7 +40,7 @@ REPO="Open-IM-Server"
|
||||
|
||||
# Version of Go you want to use, make sure it is compatible with your OpenIM-Server requirements.
|
||||
# Default is 1.18, if you want to use a different version, replace accordingly.
|
||||
GO_VERSION="1.18"
|
||||
GO_VERSION="1.20"
|
||||
|
||||
# Default HTTP_PORT is 80. If you want to use a different port, uncomment and replace the value.
|
||||
# HTTP_PORT=80
|
||||
@ -168,7 +168,7 @@ function install_go() {
|
||||
command -v go >/dev/null 2>&1
|
||||
# Determines if GO_VERSION is defined
|
||||
if [ -z "$GO_VERSION" ]; then
|
||||
GO_VERSION="1.18"
|
||||
GO_VERSION="1.20"
|
||||
fi
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
@ -309,7 +309,7 @@ function cmd_help() {
|
||||
color_echo ${BLUE_PREFIX} "-t, --tag ${CYAN_PREFIX}specify the tag (default option, set to latest if not specified)${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "-r, --release ${CYAN_PREFIX}specify the release branch (cannot be used with the tag option)${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "-gt, --github-token ${CYAN_PREFIX}set the GITHUB_TOKEN (default: not set)${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "-g, --go-version ${CYAN_PREFIX}set the Go language version (default: GO_VERSION=\"1.18\")${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "-g, --go-version ${CYAN_PREFIX}set the Go language version (default: GO_VERSION=\"1.20\")${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "--install-dir ${CYAN_PREFIX}set the OpenIM installation directory (default: /tmp)${COLOR_SUFFIX}"
|
||||
color_echo ${BLUE_PREFIX} "--cpu ${CYAN_PREFIX}set the number of concurrent processes${COLOR_SUFFIX}"
|
||||
echo
|
||||
@ -329,7 +329,7 @@ function parseinput() {
|
||||
# CHINA=false
|
||||
# TAG=latest
|
||||
# RELEASE=""
|
||||
# GO_VERSION=1.18
|
||||
# GO_VERSION=1.20
|
||||
# INSTALL_DIR=/tmp
|
||||
# GITHUB_TOKEN=""
|
||||
# CPU=$(nproc)
|
||||
|
||||
@ -57,6 +57,10 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
|
||||
var newContent string
|
||||
options := make(map[string]bool, 5)
|
||||
switch params.ContentType {
|
||||
case constant.OANotification:
|
||||
notification := sdkws.NotificationElem{}
|
||||
notification.Detail = utils.StructToJsonString(params.Content)
|
||||
newContent = utils.StructToJsonString(¬ification)
|
||||
case constant.Text:
|
||||
fallthrough
|
||||
case constant.Picture:
|
||||
@ -69,10 +73,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
|
||||
fallthrough
|
||||
case constant.File:
|
||||
fallthrough
|
||||
case constant.CustomNotTriggerConversation:
|
||||
fallthrough
|
||||
case constant.CustomOnlineOnly:
|
||||
fallthrough
|
||||
default:
|
||||
newContent = utils.StructToJsonString(params.Content)
|
||||
}
|
||||
@ -82,11 +82,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
|
||||
if params.NotOfflinePush {
|
||||
utils.SetSwitchFromOptions(options, constant.IsOfflinePush, false)
|
||||
}
|
||||
if params.ContentType == constant.CustomOnlineOnly {
|
||||
m.SetOptions(options, false)
|
||||
} else if params.ContentType == constant.CustomNotTriggerConversation {
|
||||
utils.SetSwitchFromOptions(options, constant.IsConversationUpdate, false)
|
||||
}
|
||||
pbData := msg.SendMsgReq{
|
||||
MsgData: &sdkws.MsgData{
|
||||
SendID: params.SendID,
|
||||
@ -105,14 +100,6 @@ func (m MessageApi) newUserSendMsgReq(c *gin.Context, params *apistruct.SendMsg)
|
||||
OfflinePushInfo: params.OfflinePushInfo,
|
||||
},
|
||||
}
|
||||
//if params.ContentType == constant.OANotification {
|
||||
// var tips sdkws.TipsComm
|
||||
// tips.JsonDetail = utils.StructToJsonString(params.Content)
|
||||
// pbData.MsgData.Content, err = proto.Marshal(&tips)
|
||||
// if err != nil {
|
||||
// log.ZError(c, "Marshal failed ", err, "tips", tips.String())
|
||||
// }
|
||||
//}
|
||||
return &pbData
|
||||
}
|
||||
|
||||
@ -180,15 +167,13 @@ func (m *MessageApi) getSendMsgReq(c *gin.Context, req apistruct.SendMsg) (sendM
|
||||
data = apistruct.FileElem{}
|
||||
case constant.Custom:
|
||||
data = apistruct.CustomElem{}
|
||||
case constant.Revoke:
|
||||
data = apistruct.RevokeElem{}
|
||||
case constant.OANotification:
|
||||
data = apistruct.OANotificationElem{}
|
||||
req.SessionType = constant.NotificationChatType
|
||||
case constant.CustomNotTriggerConversation:
|
||||
data = apistruct.CustomElem{}
|
||||
case constant.CustomOnlineOnly:
|
||||
data = apistruct.CustomElem{}
|
||||
if !authverify.IsManagerUserID(req.SendID) {
|
||||
return nil, errs.ErrNoPermission.
|
||||
Wrap("only app manager can as sender send OANotificationElem")
|
||||
}
|
||||
default:
|
||||
return nil, errs.ErrArgs.WithDetail("not support err contentType")
|
||||
}
|
||||
@ -212,7 +197,6 @@ func (m *MessageApi) SendMessage(c *gin.Context) {
|
||||
apiresp.GinError(c, errs.ErrNoPermission.Wrap("only app manager can send message"))
|
||||
return
|
||||
}
|
||||
|
||||
sendMsgReq, err := m.getSendMsgReq(c, req.SendMsg)
|
||||
if err != nil {
|
||||
log.ZError(c, "decodeData failed", err)
|
||||
|
||||
@ -83,7 +83,6 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
|
||||
userRouterGroup.POST("/get_users_online_status", ParseToken, u.GetUsersOnlineStatus)
|
||||
userRouterGroup.POST("/get_users_online_token_detail", ParseToken, u.GetUsersOnlineTokenDetail)
|
||||
userRouterGroup.POST("/subscribe_users_status", ParseToken, u.SubscriberStatus)
|
||||
userRouterGroup.POST("/unsubscribe_users_status", ParseToken, u.UnSubscriberStatus)
|
||||
userRouterGroup.POST("/get_users_status", ParseToken, u.GetUserStatus)
|
||||
userRouterGroup.POST("/get_subscribe_users_status", ParseToken, u.GetSubscribeUsersStatus)
|
||||
}
|
||||
|
||||
@ -190,11 +190,6 @@ func (u *UserApi) SubscriberStatus(c *gin.Context) {
|
||||
a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c)
|
||||
}
|
||||
|
||||
// UnSubscriberStatus Unsubscribe a user's presence.
|
||||
func (u *UserApi) UnSubscriberStatus(c *gin.Context) {
|
||||
a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c)
|
||||
}
|
||||
|
||||
// GetUserStatus Get the online status of the user.
|
||||
func (u *UserApi) GetUserStatus(c *gin.Context) {
|
||||
a2r.Call(user.UserClient.GetUserStatus, u.Client, c)
|
||||
|
||||
@ -18,9 +18,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
)
|
||||
|
||||
// RunWsAndServer run ws server
|
||||
func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
|
||||
fmt.Println(
|
||||
"start rpc/msg_gateway server, port: ",
|
||||
@ -42,7 +45,7 @@ func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
|
||||
go func() {
|
||||
err := hubServer.Start()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
panic(utils.Wrap1(err))
|
||||
}
|
||||
}()
|
||||
return hubServer.LongConnServer.Run()
|
||||
|
||||
@ -283,20 +283,30 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(
|
||||
return
|
||||
}
|
||||
if isNewConversation {
|
||||
if storageList[0].SessionType == constant.SuperGroupChatType {
|
||||
log.ZInfo(ctx, "group chat first create conversation", "conversationID", conversationID)
|
||||
switch storageList[0].SessionType {
|
||||
case constant.SuperGroupChatType:
|
||||
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
|
||||
conversationID)
|
||||
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, storageList[0].GroupID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "get group member ids error", err, "conversationID", conversationID)
|
||||
log.ZWarn(ctx, "get group member ids error", err, "conversationID",
|
||||
conversationID)
|
||||
} else {
|
||||
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, storageList[0].GroupID, userIDs); err != nil {
|
||||
log.ZWarn(ctx, "single chat first create conversation error", err, "conversationID", conversationID)
|
||||
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
|
||||
storageList[0].GroupID, userIDs); err != nil {
|
||||
log.ZWarn(ctx, "single chat first create conversation error", err,
|
||||
"conversationID", conversationID)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, storageList[0].RecvID, storageList[0].SendID); err != nil {
|
||||
log.ZWarn(ctx, "single chat first create conversation error", err, "conversationID", conversationID)
|
||||
case constant.SingleChatType, constant.NotificationChatType:
|
||||
if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, storageList[0].RecvID,
|
||||
storageList[0].SendID, conversationID, storageList[0].SessionType); err != nil {
|
||||
log.ZWarn(ctx, "single chat or notification first create conversation error", err,
|
||||
"conversationID", conversationID, "sessionType", storageList[0].SessionType)
|
||||
}
|
||||
default:
|
||||
log.ZWarn(ctx, "unknown session type", nil, "sessionType",
|
||||
storageList[0].SessionType)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.MsgData)
|
||||
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
|
||||
err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
|
||||
if err != nil {
|
||||
log.ZError(
|
||||
|
||||
@ -18,8 +18,6 @@ import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
|
||||
firebase "firebase.google.com/go"
|
||||
"firebase.google.com/go/messaging"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -42,20 +40,17 @@ type Fcm struct {
|
||||
}
|
||||
|
||||
func NewClient(cache cache.MsgModel) *Fcm {
|
||||
opt := option.WithCredentialsFile(filepath.Join(config2.Root, "config", config.Config.Push.Fcm.ServiceAccount))
|
||||
projectRoot := config.GetProjectRoot()
|
||||
credentialsFilePath := filepath.Join(projectRoot, "config", config.Config.Push.Fcm.ServiceAccount)
|
||||
opt := option.WithCredentialsFile(credentialsFilePath)
|
||||
fcmApp, err := firebase.NewApp(context.Background(), nil, opt)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// auth
|
||||
// fcmClient, err := fcmApp.Auth(context.Background())
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
|
||||
ctx := context.Background()
|
||||
fcmMsgClient, err := fcmApp.Messaging(ctx)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
return nil
|
||||
}
|
||||
return &Fcm{fcmMsgCli: fcmMsgClient, cache: cache}
|
||||
|
||||
@ -58,6 +58,7 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
}
|
||||
sec := msgFromMQ.MsgData.SendTime / 1000
|
||||
nowSec := utils.GetCurrentTimestampBySecond()
|
||||
log.ZDebug(ctx, "push msg", "msg", pbData.String(), "sec", sec, "nowSec", nowSec)
|
||||
if nowSec-sec > 10 {
|
||||
return
|
||||
}
|
||||
|
||||
@ -126,13 +126,12 @@ func (p *Pusher) Push2User(ctx context.Context, userIDs []string, msg *sdkws.Msg
|
||||
}
|
||||
|
||||
func (p *Pusher) UnmarshalNotificationElem(bytes []byte, t interface{}) error {
|
||||
var notificationElem struct {
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(bytes, ¬ificationElem); err != nil {
|
||||
var notification sdkws.NotificationElem
|
||||
if err := json.Unmarshal(bytes, ¬ification); err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal([]byte(notificationElem.Detail), t)
|
||||
|
||||
return json.Unmarshal([]byte(notification.Detail), t)
|
||||
}
|
||||
|
||||
func (p *Pusher) Push2SuperGroup(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
|
||||
|
||||
@ -17,8 +17,6 @@ package conversation
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/constant"
|
||||
@ -114,7 +112,10 @@ func (c *conversationServer) SetConversation(ctx context.Context, req *pbconvers
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) SetConversations(ctx context.Context, req *pbconversation.SetConversationsReq) (*pbconversation.SetConversationsResp, error) {
|
||||
//nolint
|
||||
func (c *conversationServer) SetConversations(ctx context.Context,
|
||||
req *pbconversation.SetConversationsReq,
|
||||
) (*pbconversation.SetConversationsResp, error) {
|
||||
if req.Conversation == nil {
|
||||
return nil, errs.ErrArgs.Wrap("conversation must not be nil")
|
||||
}
|
||||
@ -124,14 +125,8 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
|
||||
return nil, err
|
||||
}
|
||||
if groupInfo.Status == constant.GroupStatusDismissed {
|
||||
return nil, err
|
||||
return nil, errs.ErrDismissedAlready.Wrap("group dismissed")
|
||||
}
|
||||
// for _, userID := range req.UserIDs {
|
||||
// if _, err := c.groupRpcClient.GetGroupMemberCache(ctx, req.Conversation.GroupID, userID); err != nil {
|
||||
// log.ZError(ctx, "user not in group", err, "userID", userID, "groupID", req.Conversation.GroupID)
|
||||
// return nil, err
|
||||
// }
|
||||
// }
|
||||
}
|
||||
var unequal int
|
||||
var conv tablerelation.ConversationModel
|
||||
@ -205,7 +200,14 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
|
||||
return nil, err
|
||||
}
|
||||
for _, userID := range req.UserIDs {
|
||||
c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID)
|
||||
err := c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID,
|
||||
req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "send conversation set private notification failed", err,
|
||||
"userID", userID, "conversationID", req.Conversation.ConversationID)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if req.Conversation.BurnDuration != nil {
|
||||
@ -235,24 +237,40 @@ func (c *conversationServer) GetRecvMsgNotNotifyUserIDs(ctx context.Context, req
|
||||
}
|
||||
|
||||
// create conversation without notification for msg redis transfer.
|
||||
func (c *conversationServer) CreateSingleChatConversations(ctx context.Context, req *pbconversation.CreateSingleChatConversationsReq) (*pbconversation.CreateSingleChatConversationsResp, error) {
|
||||
var conversation tablerelation.ConversationModel
|
||||
conversation.ConversationID = msgprocessor.GetConversationIDBySessionType(constant.SingleChatType, req.RecvID, req.SendID)
|
||||
conversation.ConversationType = constant.SingleChatType
|
||||
conversation.OwnerUserID = req.SendID
|
||||
conversation.UserID = req.RecvID
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation", conversation)
|
||||
func (c *conversationServer) CreateSingleChatConversations(ctx context.Context,
|
||||
req *pbconversation.CreateSingleChatConversationsReq,
|
||||
) (*pbconversation.CreateSingleChatConversationsResp, error) {
|
||||
switch req.ConversationType {
|
||||
case constant.SingleChatType:
|
||||
var conversation tablerelation.ConversationModel
|
||||
conversation.ConversationID = req.ConversationID
|
||||
conversation.ConversationType = req.ConversationType
|
||||
conversation.OwnerUserID = req.SendID
|
||||
conversation.UserID = req.RecvID
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation", conversation)
|
||||
}
|
||||
|
||||
conversation2 := conversation
|
||||
conversation2.OwnerUserID = req.RecvID
|
||||
conversation2.UserID = req.SendID
|
||||
err = c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation2})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
|
||||
}
|
||||
case constant.NotificationChatType:
|
||||
var conversation tablerelation.ConversationModel
|
||||
conversation.ConversationID = req.ConversationID
|
||||
conversation.ConversationType = req.ConversationType
|
||||
conversation.OwnerUserID = req.RecvID
|
||||
conversation.UserID = req.SendID
|
||||
err := c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
|
||||
}
|
||||
}
|
||||
|
||||
conversation2 := conversation
|
||||
conversation2.OwnerUserID = req.RecvID
|
||||
conversation2.UserID = req.SendID
|
||||
err = c.conversationDatabase.CreateConversation(ctx, []*tablerelation.ConversationModel{&conversation2})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "create conversation failed", err, "conversation2", conversation)
|
||||
}
|
||||
return &pbconversation.CreateSingleChatConversationsResp{}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -252,7 +252,8 @@ func (s *friendServer) GetDesignatedFriends(
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context, req *pbfriend.GetDesignatedFriendsApplyReq) (resp *pbfriend.GetDesignatedFriendsApplyResp, err error) {
|
||||
func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context,
|
||||
req *pbfriend.GetDesignatedFriendsApplyReq) (resp *pbfriend.GetDesignatedFriendsApplyResp, err error) {
|
||||
friendRequests, err := s.friendDatabase.FindBothFriendRequests(ctx, req.FromUserID, req.ToUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -154,16 +154,16 @@ func (s *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *groupServer) GetUsernameMap(ctx context.Context, userIDs []string, complete bool) (map[string]string, error) {
|
||||
func (s *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string, complete bool) (map[string]*sdkws.PublicUserInfo, error) {
|
||||
if len(userIDs) == 0 {
|
||||
return map[string]string{}, nil
|
||||
return map[string]*sdkws.PublicUserInfo{}, nil
|
||||
}
|
||||
users, err := s.User.GetPublicUserInfos(ctx, userIDs, complete)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceToMapAny(users, func(e *sdkws.PublicUserInfo) (string, string) {
|
||||
return e.UserID, e.Nickname
|
||||
return utils.SliceToMapAny(users, func(e *sdkws.PublicUserInfo) (string, *sdkws.PublicUserInfo) {
|
||||
return e.UserID, e
|
||||
}), nil
|
||||
}
|
||||
|
||||
@ -468,15 +468,18 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID]
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -616,15 +619,18 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID]
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -852,32 +858,40 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq)
|
||||
|
||||
func (s *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) (*pbgroup.QuitGroupResp, error) {
|
||||
resp := &pbgroup.QuitGroupResp{}
|
||||
if req.UserID == "" {
|
||||
req.UserID = mcontext.GetOpUserID(ctx)
|
||||
} else {
|
||||
if err := authverify.CheckAccessV3(ctx, req.UserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
group, err := s.GroupDatabase.TakeGroup(ctx, req.GroupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if group.GroupType == constant.SuperGroup {
|
||||
if err := s.GroupDatabase.DeleteSuperGroupMember(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)}); err != nil {
|
||||
if err := s.GroupDatabase.DeleteSuperGroupMember(ctx, req.GroupID, []string{req.UserID}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Notification.SuperGroupNotification(ctx, mcontext.GetOpUserID(ctx), mcontext.GetOpUserID(ctx))
|
||||
_ = s.Notification.SuperGroupNotification(ctx, req.UserID, req.UserID)
|
||||
} else {
|
||||
info, err := s.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx))
|
||||
info, err := s.TakeGroupMember(ctx, req.GroupID, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.RoleLevel == constant.GroupOwner {
|
||||
return nil, errs.ErrNoPermission.Wrap("group owner can't quit")
|
||||
}
|
||||
err = s.GroupDatabase.DeleteGroupMember(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)})
|
||||
err = s.GroupDatabase.DeleteGroupMember(ctx, req.GroupID, []string{req.UserID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(info, 0))
|
||||
_ = s.Notification.MemberQuitNotification(ctx, s.groupMemberDB2PB(info, 0))
|
||||
}
|
||||
if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{mcontext.GetOpUserID(ctx)}); err != nil {
|
||||
if err := s.deleteMemberAndSetConversationSeq(ctx, req.GroupID, []string{req.UserID}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@ -1059,15 +1073,18 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr
|
||||
return nil, err
|
||||
}
|
||||
resp.Total = total
|
||||
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
nameMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID]
|
||||
e.Nickname = nameMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = nameMap[e.UserID].FaceURL
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -1453,7 +1470,7 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
@ -1461,7 +1478,10 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID]
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -1486,15 +1506,18 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nameMap, err := s.GetUsernameMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID]
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
|
||||
@ -165,8 +165,8 @@ func (m *msgServer) MarkConversationAsRead(
|
||||
m.conversationAndGetRecvID(conversation, req.UserID), seqs, hasReadSeq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if conversation.ConversationType == constant.SuperGroupChatType {
|
||||
} else if conversation.ConversationType == constant.SuperGroupChatType ||
|
||||
conversation.ConversationType == constant.NotificationChatType {
|
||||
if req.HasReadSeq > hasReadSeq {
|
||||
err = m.MsgDatabase.SetHasReadSeq(ctx, req.UserID, req.ConversationID, req.HasReadSeq)
|
||||
if err != nil {
|
||||
|
||||
@ -62,6 +62,11 @@ func (m *msgServer) PullMessageBySeqs(
|
||||
case sdkws.PullOrder_PullOrderDesc:
|
||||
isEnd = seq.Begin <= minSeq
|
||||
}
|
||||
if len(msgs) == 0 {
|
||||
log.ZWarn(ctx, "not have msgs", nil, "conversationID", seq.ConversationID, "seq", seq)
|
||||
|
||||
continue
|
||||
}
|
||||
resp.Msgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: msgs, IsEnd: isEnd}
|
||||
} else {
|
||||
var seqs []int64
|
||||
@ -71,6 +76,7 @@ func (m *msgServer) PullMessageBySeqs(
|
||||
minSeq, maxSeq, notificationMsgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, seq.ConversationID, seqs)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "GetMsgBySeqs error", err, "conversationID", seq.ConversationID, "seq", seq)
|
||||
|
||||
continue
|
||||
}
|
||||
var isEnd bool
|
||||
@ -80,6 +86,11 @@ func (m *msgServer) PullMessageBySeqs(
|
||||
case sdkws.PullOrder_PullOrderDesc:
|
||||
isEnd = seq.Begin <= minSeq
|
||||
}
|
||||
if len(notificationMsgs) == 0 {
|
||||
log.ZWarn(ctx, "not have notificationMsgs", nil, "conversationID", seq.ConversationID, "seq", seq)
|
||||
|
||||
continue
|
||||
}
|
||||
resp.NotificationMsgs[seq.ConversationID] = &sdkws.PullMsgs{Msgs: notificationMsgs, IsEnd: isEnd}
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ type SendMsgReq struct {
|
||||
type BatchSendMsgReq struct {
|
||||
SendMsg
|
||||
IsSendAll bool `json:"isSendAll"`
|
||||
RecvIDs []string `json:"recvIDs"`
|
||||
RecvIDs []string `json:"recvIDs" binding:"required"`
|
||||
}
|
||||
|
||||
type BatchSendMsgResp struct {
|
||||
|
||||
@ -28,6 +28,7 @@ type ApiCmd struct {
|
||||
func NewApiCmd() *ApiCmd {
|
||||
ret := &ApiCmd{NewRootCmd("api")}
|
||||
ret.SetRootCmdPt(ret)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -36,11 +37,13 @@ func (a *ApiCmd) AddApi(f func(port int) error) {
|
||||
return f(a.getPortFlag(cmd))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApiCmd) GetPortFromConfig(portType string) int {
|
||||
fmt.Println("GetPortFromConfig:", portType)
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.Api.OpenImApiPort[0]
|
||||
} else {
|
||||
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/OpenIMSDK/protocol/constant"
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
)
|
||||
|
||||
@ -53,38 +54,76 @@ func WithLogName(logName string) func(*CmdOpts) {
|
||||
}
|
||||
}
|
||||
|
||||
func NewRootCmd(name string, opts ...func(*CmdOpts)) (rootCmd *RootCmd) {
|
||||
rootCmd = &RootCmd{Name: name}
|
||||
c := cobra.Command{
|
||||
Use: "start openIM application",
|
||||
func NewRootCmd(name string, opts ...func(*CmdOpts)) *RootCmd {
|
||||
rootCmd := &RootCmd{Name: name}
|
||||
cmd := cobra.Command{
|
||||
Use: "Start openIM application",
|
||||
Short: fmt.Sprintf(`Start %s `, name),
|
||||
Long: fmt.Sprintf(`Start %s `, name),
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := rootCmd.getConfFromCmdAndInit(cmd); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdOpts := &CmdOpts{}
|
||||
for _, opt := range opts {
|
||||
opt(cmdOpts)
|
||||
}
|
||||
if cmdOpts.loggerPrefixName == "" {
|
||||
cmdOpts.loggerPrefixName = "OpenIM.log.all"
|
||||
}
|
||||
if err := log.InitFromConfig(cmdOpts.loggerPrefixName, name, config.Config.Log.RemainLogLevel, config.Config.Log.IsStdout, config.Config.Log.IsJson, config.Config.Log.StorageLocation, config.Config.Log.RemainRotationCount, config.Config.Log.RotationTime); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
return rootCmd.persistentPreRun(cmd, opts...)
|
||||
},
|
||||
}
|
||||
rootCmd.Command = c
|
||||
rootCmd.Command = cmd
|
||||
rootCmd.addConfFlag()
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func (rc *RootCmd) persistentPreRun(cmd *cobra.Command, opts ...func(*CmdOpts)) error {
|
||||
if err := rc.initializeConfiguration(cmd); err != nil {
|
||||
return fmt.Errorf("failed to get configuration from command: %w", err)
|
||||
}
|
||||
|
||||
cmdOpts := rc.applyOptions(opts...)
|
||||
|
||||
if err := rc.initializeLogger(cmdOpts); err != nil {
|
||||
return fmt.Errorf("failed to initialize from config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *RootCmd) initializeConfiguration(cmd *cobra.Command) error {
|
||||
return rc.getConfFromCmdAndInit(cmd)
|
||||
}
|
||||
|
||||
func (rc *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
|
||||
cmdOpts := defaultCmdOpts()
|
||||
for _, opt := range opts {
|
||||
opt(cmdOpts)
|
||||
}
|
||||
|
||||
return cmdOpts
|
||||
}
|
||||
|
||||
func (rc *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
|
||||
logConfig := config.Config.Log
|
||||
|
||||
return log.InitFromConfig(
|
||||
|
||||
cmdOpts.loggerPrefixName,
|
||||
rc.Name,
|
||||
logConfig.RemainLogLevel,
|
||||
logConfig.IsStdout,
|
||||
logConfig.IsJson,
|
||||
logConfig.StorageLocation,
|
||||
logConfig.RemainRotationCount,
|
||||
logConfig.RotationTime,
|
||||
)
|
||||
}
|
||||
|
||||
func defaultCmdOpts() *CmdOpts {
|
||||
return &CmdOpts{
|
||||
loggerPrefixName: "OpenIM.log.all",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RootCmd) SetRootCmdPt(cmdItf RootCmdPt) {
|
||||
r.cmdItf = cmdItf
|
||||
}
|
||||
|
||||
func (r *RootCmd) addConfFlag() {
|
||||
r.Command.Flags().StringP(constant.FlagConf, "c", "", "Path to config file folder")
|
||||
r.Command.Flags().StringP(constant.FlagConf, "c", "", "path to config file folder")
|
||||
}
|
||||
|
||||
func (r *RootCmd) AddPortFlag() {
|
||||
|
||||
@ -19,32 +19,31 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/constant"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:embed version
|
||||
var Version string
|
||||
|
||||
var (
|
||||
_, b, _, _ = runtime.Caller(0)
|
||||
// Root folder of this project.
|
||||
Root = filepath.Join(filepath.Dir(b), "../../..")
|
||||
)
|
||||
|
||||
const (
|
||||
FileName = "config.yaml"
|
||||
NotificationFileName = "notification.yaml"
|
||||
DefaultFolderPath = "../config/"
|
||||
)
|
||||
|
||||
// getProjectRoot returns the absolute path of the project root directory
|
||||
func GetProjectRoot() string {
|
||||
b, _ := filepath.Abs(os.Args[0])
|
||||
|
||||
return filepath.Join(filepath.Dir(b), "../../..")
|
||||
}
|
||||
|
||||
func GetOptionsByNotification(cfg NotificationConf) msgprocessor.Options {
|
||||
opts := msgprocessor.NewOptions()
|
||||
|
||||
if cfg.UnreadCount {
|
||||
opts = msgprocessor.WithOptions(opts, msgprocessor.WithUnreadCount(true))
|
||||
}
|
||||
@ -61,40 +60,38 @@ func GetOptionsByNotification(cfg NotificationConf) msgprocessor.Options {
|
||||
}
|
||||
|
||||
func initConfig(config interface{}, configName, configFolderPath string) error {
|
||||
if configFolderPath == "" {
|
||||
configFolderPath = DefaultFolderPath
|
||||
}
|
||||
configPath := filepath.Join(configFolderPath, configName)
|
||||
defer func() {
|
||||
fmt.Println("use config", configPath)
|
||||
}()
|
||||
_, err := os.Stat(configPath)
|
||||
configFolderPath = filepath.Join(configFolderPath, configName)
|
||||
_, err := os.Stat(configFolderPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
return fmt.Errorf("stat config path error: %w", err)
|
||||
}
|
||||
configPath = filepath.Join(Root, "config", configName)
|
||||
} else {
|
||||
Root = filepath.Dir(configPath)
|
||||
configFolderPath = filepath.Join(GetProjectRoot(), "config", configName)
|
||||
}
|
||||
data, err := os.ReadFile(configPath)
|
||||
data, err := os.ReadFile(configFolderPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("read file error: %w", err)
|
||||
}
|
||||
if err = yaml.Unmarshal(data, config); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unmarshal yaml error: %w", err)
|
||||
}
|
||||
fmt.Println("use config", configFolderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitConfig(configFolderPath string) error {
|
||||
err := initConfig(&Config, FileName, configFolderPath)
|
||||
if err != nil {
|
||||
if configFolderPath == "" {
|
||||
envConfigPath := os.Getenv("OPENIMCONFIG")
|
||||
if envConfigPath != "" {
|
||||
configFolderPath = envConfigPath
|
||||
} else {
|
||||
configFolderPath = DefaultFolderPath
|
||||
}
|
||||
}
|
||||
|
||||
if err := initConfig(&Config, FileName, configFolderPath); err != nil {
|
||||
return err
|
||||
}
|
||||
err = initConfig(&Config.Notification, NotificationFileName, configFolderPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return initConfig(&Config.Notification, NotificationFileName, configFolderPath)
|
||||
}
|
||||
|
||||
@ -1 +1 @@
|
||||
v3.2.0
|
||||
v3.3.0
|
||||
2
pkg/common/db/cache/black.go
vendored
2
pkg/common/db/cache/black.go
vendored
@ -52,6 +52,7 @@ func NewBlackCacheRedis(
|
||||
options rockscache.Options,
|
||||
) BlackCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
|
||||
return &BlackCacheRedis{
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rcClient,
|
||||
@ -88,5 +89,6 @@ func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (black
|
||||
func (b *BlackCacheRedis) DelBlackIDs(ctx context.Context, userID string) BlackCache {
|
||||
cache := b.NewCache()
|
||||
cache.AddKeys(b.getBlackIDsKey(userID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
268
pkg/common/db/cache/conversation.go
vendored
268
pkg/common/db/cache/conversation.go
vendored
@ -73,7 +73,7 @@ type ConversationCache interface {
|
||||
GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
|
||||
|
||||
GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
//GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
|
||||
GetConversationsByConversationID(ctx context.Context,
|
||||
@ -83,12 +83,9 @@ type ConversationCache interface {
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
}
|
||||
|
||||
func NewConversationRedis(
|
||||
rdb redis.UniversalClient,
|
||||
opts rockscache.Options,
|
||||
db relationtb.ConversationModelInterface,
|
||||
) ConversationCache {
|
||||
func NewConversationRedis(rdb redis.UniversalClient, opts rockscache.Options, db relationtb.ConversationModelInterface) ConversationCache {
|
||||
rcClient := rockscache.NewClient(rdb, opts)
|
||||
|
||||
return &ConversationRedisCache{
|
||||
rcClient: rcClient,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
@ -110,6 +107,7 @@ func NewNewConversationRedis(
|
||||
options rockscache.Options,
|
||||
) ConversationCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
|
||||
return &ConversationRedisCache{
|
||||
rcClient: rcClient,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
@ -156,24 +154,19 @@ func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conv
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getConversationIDsKey(ownerUserID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) ConversationCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, c.getConversationIDsKey(userID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
@ -181,10 +174,7 @@ func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID strin
|
||||
return conversationIDsHashKey + ownerUserID
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(
|
||||
ctx context.Context,
|
||||
ownerUserID string,
|
||||
) (hash uint64, err error) {
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
@ -204,229 +194,180 @@ func (c *ConversationRedisCache) GetUserConversationIDsHash(
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversation(
|
||||
ctx context.Context,
|
||||
ownerUserID, conversationID string,
|
||||
) (*relationtb.ConversationModel, error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getConversationKey(ownerUserID, conversationID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
},
|
||||
)
|
||||
func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.ConversationModel, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIndex(
|
||||
convsation *relationtb.ConversationModel,
|
||||
keys []string,
|
||||
) (int, error) {
|
||||
func (c *ConversationRedisCache) getConversationIndex(convsation *relationtb.ConversationModel, keys []string) (int, error) {
|
||||
key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID)
|
||||
for _i, _key := range keys {
|
||||
if _key == key {
|
||||
return _i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("not found key:" + key + " in keys")
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(
|
||||
ctx context.Context,
|
||||
ownerUserID string,
|
||||
conversationIDs []string,
|
||||
) ([]*relationtb.ConversationModel, error) {
|
||||
var keys []string
|
||||
for _, conversarionID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
keys,
|
||||
c.expireTime,
|
||||
c.getConversationIndex,
|
||||
func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
},
|
||||
)
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
//var keys []string
|
||||
//for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
//}
|
||||
//return batchGetCache(
|
||||
// ctx,
|
||||
// c.rcClient,
|
||||
// keys,
|
||||
// c.expireTime,
|
||||
// c.getConversationIndex,
|
||||
// func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
// return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
// },
|
||||
//)
|
||||
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(ctx context.Context, conversationID string) (*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(
|
||||
ctx context.Context,
|
||||
ownerUserID string,
|
||||
) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.ConversationModel, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, conversarionID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
keys,
|
||||
c.expireTime,
|
||||
c.getConversationIndex,
|
||||
func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.FindUserIDAllConversations(ctx, ownerUserID)
|
||||
},
|
||||
)
|
||||
//var keys []string
|
||||
//for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationKey(ownerUserID, conversarionID))
|
||||
//}
|
||||
//return batchGetCache(
|
||||
// ctx,
|
||||
// c.rcClient,
|
||||
// keys,
|
||||
// c.expireTime,
|
||||
// c.getConversationIndex,
|
||||
// func(ctx context.Context) ([]*relationtb.ConversationModel, error) {
|
||||
// return c.conversationDB.FindUserIDAllConversations(ctx, ownerUserID)
|
||||
// },
|
||||
//)
|
||||
return c.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(
|
||||
ctx context.Context,
|
||||
ownerUserID, conversationID string,
|
||||
) (opt int, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getRecvMsgOptKey(ownerUserID, conversationID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
},
|
||||
)
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
) (userIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getSuperGroupRecvNotNotifyUserIDsKey(groupID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (userIDs []string, err error) {
|
||||
return c.conversationDB.FindSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
},
|
||||
)
|
||||
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsKey(groupID), c.expireTime, func(ctx context.Context) (userIDs []string, err error) {
|
||||
return c.conversationDB.FindSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (hash uint64, err error) {
|
||||
userIDs, err := c.GetSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
utils.Sort(userIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(utils.Md5(strings.Join(userIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
func (c *ConversationRedisCache) GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID), c.expireTime, func(ctx context.Context) (hash uint64, err error) {
|
||||
userIDs, err := c.GetSuperGroupRecvMsgNotNotifyUserIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
utils.Sort(userIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(utils.Md5(strings.Join(userIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(
|
||||
conversationID string,
|
||||
conversationIDs []string,
|
||||
) (int, error) {
|
||||
func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(conversationID string, conversationIDs []string) (int, error) {
|
||||
for _i, _conversationID := range conversationIDs {
|
||||
if _conversationID == conversationID {
|
||||
return _i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("not found key:" + conversationID + " in keys")
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllHasReadSeqs(
|
||||
ctx context.Context,
|
||||
ownerUserID string,
|
||||
) (map[string]int64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, conversarionID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
|
||||
}
|
||||
return batchGetCacheMap(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
keys,
|
||||
conversationIDs,
|
||||
c.expireTime,
|
||||
c.getUserAllHasReadSeqsIndex,
|
||||
func(ctx context.Context) (map[string]int64, error) {
|
||||
return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
},
|
||||
)
|
||||
}
|
||||
//func (c *ConversationRedisCache) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
|
||||
// conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var keys []string
|
||||
// for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
|
||||
// }
|
||||
// return batchGetCacheMap(ctx, c.rcClient, keys, conversationIDs, c.expireTime, c.getUserAllHasReadSeqsIndex, func(ctx context.Context) (map[string]int64, error) {
|
||||
// return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
// })
|
||||
//}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string,
|
||||
conversationIDs ...string,
|
||||
) ConversationCache {
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationsByConversationID(
|
||||
ctx context.Context,
|
||||
conversationIDs []string,
|
||||
) ([]*relationtb.ConversationModel, error) {
|
||||
func (c *ConversationRedisCache) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@ -435,15 +376,9 @@ func (c *ConversationRedisCache) DelConversationByConversationID(conversationIDs
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getConversationNotReceiveMessageUserIDsKey(conversationID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache {
|
||||
@ -451,5 +386,6 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
81
pkg/common/db/cache/friend.go
vendored
81
pkg/common/db/cache/friend.go
vendored
@ -53,11 +53,8 @@ type FriendCacheRedis struct {
|
||||
rcClient *rockscache.Client
|
||||
}
|
||||
|
||||
func NewFriendCacheRedis(
|
||||
rdb redis.UniversalClient,
|
||||
friendDB relationtb.FriendModelInterface,
|
||||
options rockscache.Options,
|
||||
) FriendCache {
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, friendDB relationtb.FriendModelInterface,
|
||||
options rockscache.Options) FriendCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
return &FriendCacheRedis{
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
@ -67,12 +64,12 @@ func NewFriendCacheRedis(
|
||||
}
|
||||
}
|
||||
|
||||
func (c *FriendCacheRedis) NewCache() FriendCache {
|
||||
func (f *FriendCacheRedis) NewCache() FriendCache {
|
||||
return &FriendCacheRedis{
|
||||
rcClient: c.rcClient,
|
||||
metaCache: NewMetaCacheRedis(c.rcClient, c.metaCache.GetPreDelKeys()...),
|
||||
friendDB: c.friendDB,
|
||||
expireTime: c.expireTime,
|
||||
rcClient: f.rcClient,
|
||||
metaCache: NewMetaCacheRedis(f.rcClient, f.metaCache.GetPreDelKeys()...),
|
||||
friendDB: f.friendDB,
|
||||
expireTime: f.expireTime,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,32 +86,24 @@ func (f *FriendCacheRedis) getFriendKey(ownerUserID, friendUserID string) string
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
f.rcClient,
|
||||
f.getFriendIDsKey(ownerUserID),
|
||||
f.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return f.friendDB.FindFriendUserIDs(ctx, ownerUserID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, f.rcClient, f.getFriendIDsKey(ownerUserID), f.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return f.friendDB.FindFriendUserIDs(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserID ...string) FriendCache {
|
||||
new := f.NewCache()
|
||||
var keys []string
|
||||
for _, userID := range ownerUserID {
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) FriendCache {
|
||||
newGroupCache := f.NewCache()
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, userID := range ownerUserIDs {
|
||||
keys = append(keys, f.getFriendIDsKey(userID))
|
||||
}
|
||||
new.AddKeys(keys...)
|
||||
return new
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
// todo.
|
||||
func (f *FriendCacheRedis) GetTwoWayFriendIDs(
|
||||
ctx context.Context,
|
||||
ownerUserID string,
|
||||
) (twoWayFriendIDs []string, err error) {
|
||||
func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
|
||||
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -128,32 +117,28 @@ func (f *FriendCacheRedis) GetTwoWayFriendIDs(
|
||||
twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID)
|
||||
}
|
||||
}
|
||||
|
||||
return twoWayFriendIDs, nil
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) FriendCache {
|
||||
new := f.NewCache()
|
||||
new.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
|
||||
return new
|
||||
newFriendCache := f.NewCache()
|
||||
newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) GetFriend(
|
||||
ctx context.Context,
|
||||
ownerUserID, friendUserID string,
|
||||
) (friend *relationtb.FriendModel, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
f.rcClient,
|
||||
f.getFriendKey(ownerUserID, friendUserID),
|
||||
f.expireTime,
|
||||
func(ctx context.Context) (*relationtb.FriendModel, error) {
|
||||
return f.friendDB.Take(ctx, ownerUserID, friendUserID)
|
||||
},
|
||||
)
|
||||
func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID,
|
||||
friendUserID string) (friend *relationtb.FriendModel, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendKey(ownerUserID,
|
||||
friendUserID), f.expireTime, func(ctx context.Context) (*relationtb.FriendModel, error) {
|
||||
return f.friendDB.Take(ctx, ownerUserID, friendUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) FriendCache {
|
||||
new := f.NewCache()
|
||||
new.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
|
||||
return new
|
||||
newFriendCache := f.NewCache()
|
||||
newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
298
pkg/common/db/cache/group.go
vendored
298
pkg/common/db/cache/group.go
vendored
@ -65,22 +65,10 @@ type GroupCache interface {
|
||||
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
|
||||
DelJoinedGroupID(userID ...string) GroupCache
|
||||
|
||||
GetGroupMemberInfo(
|
||||
ctx context.Context,
|
||||
groupID, userID string,
|
||||
) (groupMember *relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersInfo(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
userID []string,
|
||||
) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersPage(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
userID []string,
|
||||
showNumber, pageNumber int32,
|
||||
) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
GetGroupMembersPage(ctx context.Context, groupID string, userID []string, showNumber, pageNumber int32) (total uint32, groupMembers []*relationtb.GroupMemberModel, err error)
|
||||
|
||||
DelGroupMembersInfo(groupID string, userID ...string) GroupCache
|
||||
|
||||
@ -109,6 +97,7 @@ func NewGroupCacheRedis(
|
||||
opts rockscache.Options,
|
||||
) GroupCache {
|
||||
rcClient := rockscache.NewClient(rdb, opts)
|
||||
|
||||
return &GroupCacheRedis{
|
||||
rcClient: rcClient, expireTime: groupExpireTime,
|
||||
groupDB: groupDB, groupMemberDB: groupMemberDB, groupRequestDB: groupRequestDB,
|
||||
@ -169,6 +158,7 @@ func (g *GroupCacheRedis) GetGroupIndex(group *relationtb.GroupModel, keys []str
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errIndex
|
||||
}
|
||||
|
||||
@ -179,117 +169,98 @@ func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *relationtb.GroupMembe
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errIndex
|
||||
}
|
||||
|
||||
// / groupInfo.
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(
|
||||
ctx context.Context,
|
||||
groupIDs []string,
|
||||
) (groups []*relationtb.GroupModel, err error) {
|
||||
var keys []string
|
||||
for _, group := range groupIDs {
|
||||
keys = append(keys, g.getGroupInfoKey(group))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
keys,
|
||||
g.expireTime,
|
||||
g.GetGroupIndex,
|
||||
func(ctx context.Context) ([]*relationtb.GroupModel, error) {
|
||||
return g.groupDB.Find(ctx, groupIDs)
|
||||
},
|
||||
)
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*relationtb.GroupModel, err error) {
|
||||
//var keys []string
|
||||
//for _, group := range groupIDs {
|
||||
// keys = append(keys, g.getGroupInfoKey(group))
|
||||
//}
|
||||
//return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupIndex, func(ctx context.Context) ([]*relationtb.GroupModel, error) {
|
||||
// return g.groupDB.Find(ctx, groupIDs)
|
||||
//})
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupInfoKey(groupID)
|
||||
}, func(ctx context.Context, groupID string) (*relationtb.GroupModel, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *relationtb.GroupModel, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getGroupInfoKey(groupID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) (*relationtb.GroupModel, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*relationtb.GroupModel, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) GroupCache {
|
||||
new := g.NewCache()
|
||||
var keys []string
|
||||
newGroupCache := g.NewCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupInfoKey(groupID))
|
||||
}
|
||||
new.AddKeys(keys...)
|
||||
return new
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
) (joinedSuperGroupIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getJoinedSuperGroupsIDKey(userID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
userGroup, err := g.mongoDB.GetSuperGroupByUserID(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userGroup.GroupIDs, nil
|
||||
},
|
||||
func (g *GroupCacheRedis) GetJoinedSuperGroupIDs(ctx context.Context, userID string) (joinedSuperGroupIDs []string, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getJoinedSuperGroupsIDKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
userGroup, err := g.mongoDB.GetSuperGroupByUserID(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return userGroup.GroupIDs, nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetSuperGroupMemberIDs(
|
||||
ctx context.Context,
|
||||
groupIDs ...string,
|
||||
) (models []*unrelationtb.SuperGroupModel, err error) {
|
||||
var keys []string
|
||||
for _, group := range groupIDs {
|
||||
keys = append(keys, g.getSuperGroupMemberIDsKey(group))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
keys,
|
||||
g.expireTime,
|
||||
func(model *unrelationtb.SuperGroupModel, keys []string) (int, error) {
|
||||
for i, key := range keys {
|
||||
if g.getSuperGroupMemberIDsKey(model.GroupID) == key {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return 0, errIndex
|
||||
},
|
||||
func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) {
|
||||
return g.mongoDB.FindSuperGroup(ctx, groupIDs)
|
||||
},
|
||||
)
|
||||
func (g *GroupCacheRedis) GetSuperGroupMemberIDs(ctx context.Context, groupIDs ...string) (models []*unrelationtb.SuperGroupModel, err error) {
|
||||
//var keys []string
|
||||
//for _, group := range groupIDs {
|
||||
// keys = append(keys, g.getSuperGroupMemberIDsKey(group))
|
||||
//}
|
||||
//return batchGetCache(ctx, g.rcClient, keys, g.expireTime, func(model *unrelationtb.SuperGroupModel, keys []string) (int, error) {
|
||||
// for i, key := range keys {
|
||||
// if g.getSuperGroupMemberIDsKey(model.GroupID) == key {
|
||||
// return i, nil
|
||||
// }
|
||||
// }
|
||||
// return 0, errIndex
|
||||
//},
|
||||
// func(ctx context.Context) ([]*unrelationtb.SuperGroupModel, error) {
|
||||
// return g.mongoDB.FindSuperGroup(ctx, groupIDs)
|
||||
// })
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getSuperGroupMemberIDsKey(groupID)
|
||||
}, func(ctx context.Context, groupID string) (*unrelationtb.SuperGroupModel, error) {
|
||||
return g.mongoDB.TakeSuperGroup(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
// userJoinSuperGroup.
|
||||
func (g *GroupCacheRedis) DelJoinedSuperGroupIDs(userIDs ...string) GroupCache {
|
||||
new := g.NewCache()
|
||||
var keys []string
|
||||
newGroupCache := g.NewCache()
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinedSuperGroupsIDKey(userID))
|
||||
}
|
||||
new.AddKeys(keys...)
|
||||
return new
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelSuperGroupMemberIDs(groupIDs ...string) GroupCache {
|
||||
new := g.NewCache()
|
||||
var keys []string
|
||||
newGroupCache := g.NewCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getSuperGroupMemberIDsKey(groupID))
|
||||
}
|
||||
new.AddKeys(keys...)
|
||||
return new
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
// groupMembersHash.
|
||||
@ -351,10 +322,7 @@ func (g *GroupCacheRedis) GetGroupMembersHash(ctx context.Context, groupID strin
|
||||
//)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(
|
||||
ctx context.Context,
|
||||
groupIDs []string,
|
||||
) (map[string]*relationtb.GroupSimpleUserID, error) {
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*relationtb.GroupSimpleUserID, error) {
|
||||
res := make(map[string]*relationtb.GroupSimpleUserID)
|
||||
for _, groupID := range groupIDs {
|
||||
hash, err := g.GetGroupMembersHash(ctx, groupID)
|
||||
@ -368,26 +336,22 @@ func (g *GroupCacheRedis) GetGroupMemberHashMap(
|
||||
}
|
||||
res[groupID] = &relationtb.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) GroupCache {
|
||||
cache := g.NewCache()
|
||||
cache.AddKeys(g.getGroupMembersHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
// groupMemberIDs.
|
||||
func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getGroupMemberIDsKey(groupID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindMemberUserID(ctx, groupID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberIDsKey(groupID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindMemberUserID(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) {
|
||||
@ -399,71 +363,53 @@ func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []str
|
||||
}
|
||||
m[groupID] = userIDs
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) GroupCache {
|
||||
cache := g.NewCache()
|
||||
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getJoinedGroupsKey(userID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, g.rcClient, g.getJoinedGroupsKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) GroupCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinedGroupsKey(userID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(
|
||||
ctx context.Context,
|
||||
groupID, userID string,
|
||||
) (groupMember *relationtb.GroupMemberModel, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getGroupMemberInfoKey(groupID, userID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) (*relationtb.GroupMemberModel, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
},
|
||||
)
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *relationtb.GroupMemberModel, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*relationtb.GroupMemberModel, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
userIDs []string,
|
||||
) ([]*relationtb.GroupMemberModel, error) {
|
||||
var keys []string
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
keys,
|
||||
g.expireTime,
|
||||
g.GetGroupMemberIndex,
|
||||
func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
|
||||
return g.groupMemberDB.Find(ctx, []string{groupID}, userIDs, nil)
|
||||
},
|
||||
)
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*relationtb.GroupMemberModel, error) {
|
||||
//var keys []string
|
||||
//for _, userID := range userIDs {
|
||||
// keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
//}
|
||||
//return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupMemberIndex, func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
|
||||
// return g.groupMemberDB.Find(ctx, []string{groupID}, userIDs, nil)
|
||||
//})
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(ctx context.Context, userID string) (*relationtb.GroupMemberModel, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersPage(
|
||||
@ -482,72 +428,58 @@ func (g *GroupCacheRedis) GetGroupMembersPage(
|
||||
userIDs = groupMemberIDs
|
||||
}
|
||||
groupMembers, err = g.GetGroupMembersInfo(ctx, groupID, utils.Paginate(userIDs, int(showNumber), int(showNumber)))
|
||||
|
||||
return uint32(len(userIDs)), groupMembers, err
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
) (groupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*relationtb.GroupMemberModel, err error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMemberInfo(
|
||||
ctx context.Context,
|
||||
groupID string,
|
||||
) ([]*relationtb.GroupMemberModel, error) {
|
||||
func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*relationtb.GroupMemberModel, error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, groupMemberID := range groupMemberIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
keys,
|
||||
g.expireTime,
|
||||
g.GetGroupMemberIndex,
|
||||
func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
|
||||
return g.groupMemberDB.Find(ctx, []string{groupID}, groupMemberIDs, nil)
|
||||
},
|
||||
)
|
||||
//var keys []string
|
||||
//for _, groupMemberID := range groupMemberIDs {
|
||||
// keys = append(keys, g.getGroupMemberInfoKey(groupID, groupMemberID))
|
||||
//}
|
||||
//return batchGetCache(ctx, g.rcClient, keys, g.expireTime, g.GetGroupMemberIndex, func(ctx context.Context) ([]*relationtb.GroupMemberModel, error) {
|
||||
// return g.groupMemberDB.Find(ctx, []string{groupID}, groupMemberIDs, nil)
|
||||
//})
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) GroupCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
g.rcClient,
|
||||
g.getGroupMemberNumKey(groupID),
|
||||
g.expireTime,
|
||||
func(ctx context.Context) (int64, error) {
|
||||
return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID)
|
||||
},
|
||||
)
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberNumKey(groupID), g.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) GroupCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(groupID))
|
||||
for _, groupID := range groupID {
|
||||
keys = append(keys, g.getGroupMemberNumKey(groupID))
|
||||
}
|
||||
cache := g.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
231
pkg/common/db/cache/meta_cache.go
vendored
231
pkg/common/db/cache/meta_cache.go
vendored
@ -18,7 +18,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
@ -59,27 +58,40 @@ type metaCacheRedis struct {
|
||||
func (m *metaCacheRedis) ExecDel(ctx context.Context) error {
|
||||
if len(m.keys) > 0 {
|
||||
log.ZDebug(ctx, "delete cache", "keys", m.keys)
|
||||
retryTimes := 0
|
||||
for {
|
||||
if err := m.rcClient.TagAsDeletedBatch2(ctx, m.keys); err != nil {
|
||||
if retryTimes >= m.maxRetryTimes {
|
||||
err = errs.ErrInternalServer.Wrap(
|
||||
fmt.Sprintf(
|
||||
"delete cache error: %v, keys: %v, retry times %d, please check redis server",
|
||||
err,
|
||||
m.keys,
|
||||
retryTimes,
|
||||
),
|
||||
)
|
||||
log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", m.keys)
|
||||
return err
|
||||
for _, key := range m.keys {
|
||||
for i := 0; i < m.maxRetryTimes; i++ {
|
||||
if err := m.rcClient.TagAsDeleted(key); err != nil {
|
||||
log.ZError(ctx, "delete cache failed", err, "key", key)
|
||||
time.Sleep(m.retryInterval)
|
||||
continue
|
||||
}
|
||||
retryTimes++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
//retryTimes := 0
|
||||
//for {
|
||||
// m.rcClient.TagAsDeleted()
|
||||
// if err := m.rcClient.TagAsDeletedBatch2(ctx, []string{key}); err != nil {
|
||||
// if retryTimes >= m.maxRetryTimes {
|
||||
// err = errs.ErrInternalServer.Wrap(
|
||||
// fmt.Sprintf(
|
||||
// "delete cache error: %v, keys: %v, retry times %d, please check redis server",
|
||||
// err,
|
||||
// key,
|
||||
// retryTimes,
|
||||
// ),
|
||||
// )
|
||||
// log.ZWarn(ctx, "delete cache failed, please handle keys", err, "keys", key)
|
||||
// return err
|
||||
// }
|
||||
// retryTimes++
|
||||
// } else {
|
||||
// break
|
||||
// }
|
||||
//}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -103,16 +115,11 @@ func GetDefaultOpt() rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func getCache[T any](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
key string,
|
||||
expire time.Duration,
|
||||
fn func(ctx context.Context) (T, error),
|
||||
) (T, error) {
|
||||
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
@ -125,6 +132,7 @@ func getCache[T any](
|
||||
return "", utils.Wrap(err, "")
|
||||
}
|
||||
write = true
|
||||
|
||||
return string(bs), nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -139,95 +147,116 @@ func getCache[T any](
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire)
|
||||
|
||||
return t, utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func batchGetCache[T any](
|
||||
// func batchGetCache[T any](ctx context.Context, rcClient *rockscache.Client, keys []string, expire time.Duration, keyIndexFn func(t T, keys []string) (int, error), fn func(ctx context.Context) ([]T,
|
||||
// error)) ([]T, error) {
|
||||
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
// values := make(map[int]string)
|
||||
// tArrays, err := fn(ctx)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// for _, v := range tArrays {
|
||||
// index, err := keyIndexFn(v, keys)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// bs, err := json.Marshal(v)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "marshal failed")
|
||||
// }
|
||||
// values[index] = string(bs)
|
||||
// }
|
||||
// return values, nil
|
||||
// })
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var tArrays []T
|
||||
// for _, v := range batchMap {
|
||||
// if v != "" {
|
||||
// var t T
|
||||
// err = json.Unmarshal([]byte(v), &t)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "unmarshal failed")
|
||||
// }
|
||||
// tArrays = append(tArrays, t)
|
||||
// }
|
||||
// }
|
||||
// return tArrays, nil
|
||||
//}
|
||||
|
||||
func batchGetCache2[T any, K comparable](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
keys []string,
|
||||
expire time.Duration,
|
||||
keyIndexFn func(t T, keys []string) (int, error),
|
||||
fn func(ctx context.Context) ([]T, error),
|
||||
keys []K,
|
||||
keyFn func(key K) string,
|
||||
fns func(ctx context.Context, key K) (T, error),
|
||||
) ([]T, error) {
|
||||
batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
values := make(map[int]string)
|
||||
tArrays, err := fn(ctx)
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]T, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
return fns(ctx, key)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range tArrays {
|
||||
index, err := keyIndexFn(v, keys)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, utils.Wrap(err, "marshal failed")
|
||||
}
|
||||
values[index] = string(bs)
|
||||
}
|
||||
return values, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
res = append(res, val)
|
||||
}
|
||||
var tArrays []T
|
||||
for _, v := range batchMap {
|
||||
if v != "" {
|
||||
var t T
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
return nil, utils.Wrap(err, "unmarshal failed")
|
||||
}
|
||||
tArrays = append(tArrays, t)
|
||||
}
|
||||
}
|
||||
return tArrays, nil
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func batchGetCacheMap[T any](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
keys, originKeys []string,
|
||||
expire time.Duration,
|
||||
keyIndexFn func(s string, keys []string) (int, error),
|
||||
fn func(ctx context.Context) (map[string]T, error),
|
||||
) (map[string]T, error) {
|
||||
batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
tArrays, err := fn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values := make(map[int]string)
|
||||
for k, v := range tArrays {
|
||||
index, err := keyIndexFn(k, originKeys)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, utils.Wrap(err, "marshal failed")
|
||||
}
|
||||
values[index] = string(bs)
|
||||
}
|
||||
return values, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tMap := make(map[string]T)
|
||||
for i, v := range batchMap {
|
||||
if v != "" {
|
||||
var t T
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
return nil, utils.Wrap(err, "unmarshal failed")
|
||||
}
|
||||
tMap[originKeys[i]] = t
|
||||
}
|
||||
}
|
||||
return tMap, nil
|
||||
}
|
||||
//func batchGetCacheMap[T any](
|
||||
// ctx context.Context,
|
||||
// rcClient *rockscache.Client,
|
||||
// keys, originKeys []string,
|
||||
// expire time.Duration,
|
||||
// keyIndexFn func(s string, keys []string) (int, error),
|
||||
// fn func(ctx context.Context) (map[string]T, error),
|
||||
//) (map[string]T, error) {
|
||||
// batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
|
||||
// tArrays, err := fn(ctx)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// values := make(map[int]string)
|
||||
// for k, v := range tArrays {
|
||||
// index, err := keyIndexFn(k, originKeys)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// bs, err := json.Marshal(v)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "marshal failed")
|
||||
// }
|
||||
// values[index] = string(bs)
|
||||
// }
|
||||
// return values, nil
|
||||
// })
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// tMap := make(map[string]T)
|
||||
// for i, v := range batchMap {
|
||||
// if v != "" {
|
||||
// var t T
|
||||
// err = json.Unmarshal([]byte(v), &t)
|
||||
// if err != nil {
|
||||
// return nil, utils.Wrap(err, "unmarshal failed")
|
||||
// }
|
||||
// tMap[originKeys[i]] = t
|
||||
// }
|
||||
// }
|
||||
// return tMap, nil
|
||||
//}
|
||||
|
||||
399
pkg/common/db/cache/msg.go
vendored
399
pkg/common/db/cache/msg.go
vendored
@ -16,13 +16,16 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
|
||||
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
"github.com/OpenIMSDK/tools/errs"
|
||||
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
@ -33,7 +36,6 @@ import (
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
@ -105,11 +107,7 @@ type MsgModel interface {
|
||||
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
|
||||
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
|
||||
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
GetMessagesBySeq(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
seqs []int64,
|
||||
) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error)
|
||||
UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error
|
||||
DelUserDeleteMsgsList(ctx context.Context, conversationID string, seqs []int64)
|
||||
@ -122,12 +120,7 @@ type MsgModel interface {
|
||||
JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
|
||||
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
|
||||
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
|
||||
SetMessageReactionExpire(
|
||||
ctx context.Context,
|
||||
clientMsgID string,
|
||||
sessionType int32,
|
||||
expiration time.Duration,
|
||||
) (bool, error)
|
||||
SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
|
||||
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
|
||||
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
|
||||
LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
|
||||
@ -158,50 +151,51 @@ func (c *msgCache) getHasReadSeqKey(conversationID string, userID string) string
|
||||
return hasReadSeq + userID + ":" + conversationID
|
||||
}
|
||||
|
||||
func (c *msgCache) setSeq(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
seq int64,
|
||||
getkey func(conversationID string) string,
|
||||
) error {
|
||||
func (c *msgCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
|
||||
return utils.Wrap1(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) getSeq(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
getkey func(conversationID string) string,
|
||||
) (int64, error) {
|
||||
func (c *msgCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
|
||||
return utils.Wrap2(c.rdb.Get(ctx, getkey(conversationID)).Int64())
|
||||
}
|
||||
|
||||
func (c *msgCache) getSeqs(
|
||||
ctx context.Context,
|
||||
items []string,
|
||||
getkey func(s string) string,
|
||||
) (m map[string]int64, err error) {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, v := range items {
|
||||
if err := pipe.Get(ctx, getkey(v)).Err(); err != nil && err != redis.Nil {
|
||||
func (c *msgCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
|
||||
m = make(map[string]int64, len(items))
|
||||
for i, v := range items {
|
||||
res, err := c.rdb.Get(ctx, getkey(v)).Result()
|
||||
if err != nil && err != redis.Nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
result, err := pipe.Exec(ctx)
|
||||
if err != nil && err != redis.Nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
m = make(map[string]int64, len(items))
|
||||
for i, v := range result {
|
||||
seq := v.(*redis.StringCmd)
|
||||
if seq.Err() != nil && seq.Err() != redis.Nil {
|
||||
return nil, errs.Wrap(v.Err())
|
||||
}
|
||||
val := utils.StringToInt64(seq.Val())
|
||||
val := utils.StringToInt64(res)
|
||||
if val != 0 {
|
||||
m[items[i]] = val
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for _, v := range items {
|
||||
// if err := pipe.Get(ctx, getkey(v)).Err(); err != nil && err != redis.Nil {
|
||||
// return nil, errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
//result, err := pipe.Exec(ctx)
|
||||
//if err != nil && err != redis.Nil {
|
||||
// return nil, errs.Wrap(err)
|
||||
//}
|
||||
//m = make(map[string]int64, len(items))
|
||||
//for i, v := range result {
|
||||
// seq := v.(*redis.StringCmd)
|
||||
// if seq.Err() != nil && seq.Err() != redis.Nil {
|
||||
// return nil, errs.Wrap(v.Err())
|
||||
// }
|
||||
// val := utils.StringToInt64(seq.Val())
|
||||
// if val != 0 {
|
||||
// m[items[i]] = val
|
||||
// }
|
||||
//}
|
||||
//return m, nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
|
||||
@ -221,15 +215,21 @@ func (c *msgCache) SetMinSeq(ctx context.Context, conversationID string, minSeq
|
||||
}
|
||||
|
||||
func (c *msgCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for k, seq := range seqs {
|
||||
err := pipe.Set(ctx, getkey(k), seq, 0).Err()
|
||||
if err != nil {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
return nil
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for k, seq := range seqs {
|
||||
// err := pipe.Set(ctx, getkey(k), seq, 0).Err()
|
||||
// if err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
//_, err := pipe.Exec(ctx)
|
||||
//return err
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
@ -252,30 +252,17 @@ func (c *msgCache) GetConversationUserMinSeq(ctx context.Context, conversationID
|
||||
return utils.Wrap2(c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetConversationUserMinSeqs(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
userIDs []string,
|
||||
) (m map[string]int64, err error) {
|
||||
func (c *msgCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
|
||||
return c.getSeqs(ctx, userIDs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *msgCache) SetConversationUserMinSeq(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
userID string,
|
||||
minSeq int64,
|
||||
) error {
|
||||
func (c *msgCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
|
||||
return utils.Wrap1(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) SetConversationUserMinSeqs(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
seqs map[string]int64,
|
||||
) (err error) {
|
||||
func (c *msgCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
|
||||
return c.setSeqs(ctx, seqs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
@ -303,11 +290,7 @@ func (c *msgCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasRea
|
||||
})
|
||||
}
|
||||
|
||||
func (c *msgCache) GetHasReadSeqs(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
conversationIDs []string,
|
||||
) (map[string]int64, error) {
|
||||
func (c *msgCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
@ -319,6 +302,7 @@ func (c *msgCache) GetHasReadSeq(ctx context.Context, userID string, conversatio
|
||||
|
||||
func (c *msgCache) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
|
||||
|
||||
return errs.Wrap(c.rdb.HSet(ctx, key, token, flag).Err())
|
||||
}
|
||||
|
||||
@ -332,6 +316,7 @@ func (c *msgCache) GetTokensWithoutError(ctx context.Context, userID string, pla
|
||||
for k, v := range m {
|
||||
mm[k] = utils.StringToInt(v)
|
||||
}
|
||||
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
@ -341,11 +326,13 @@ func (c *msgCache) SetTokenMapByUidPid(ctx context.Context, userID string, platf
|
||||
for k, v := range m {
|
||||
mm[k] = v
|
||||
}
|
||||
|
||||
return errs.Wrap(c.rdb.HSet(ctx, key, mm).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteTokenByUidPid(ctx context.Context, userID string, platform int, fields []string) error {
|
||||
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platform)
|
||||
|
||||
return errs.Wrap(c.rdb.HDel(ctx, key, fields...).Err())
|
||||
}
|
||||
|
||||
@ -357,58 +344,86 @@ func (c *msgCache) allMessageCacheKey(conversationID string) string {
|
||||
return messageCache + conversationID + "_*"
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessagesBySeq(
|
||||
ctx context.Context,
|
||||
conversationID string,
|
||||
seqs []int64,
|
||||
) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, v := range seqs {
|
||||
// MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
|
||||
key := c.getMessageCacheKey(conversationID, v)
|
||||
if err := pipe.Get(ctx, key).Err(); err != nil && err != redis.Nil {
|
||||
return nil, nil, err
|
||||
func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) {
|
||||
for _, seq := range seqs {
|
||||
res, err := c.rdb.Get(ctx, c.getMessageCacheKey(conversationID, seq)).Result()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq failed", err, "conversationID", conversationID, "seq", seq)
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
}
|
||||
result, err := pipe.Exec(ctx)
|
||||
for i, v := range result {
|
||||
cmd := v.(*redis.StringCmd)
|
||||
if cmd.Err() != nil {
|
||||
failedSeqs = append(failedSeqs, seqs[i])
|
||||
} else {
|
||||
msg := sdkws.MsgData{}
|
||||
err = msgprocessor.String2Pb(cmd.Val(), &msg)
|
||||
if err == nil {
|
||||
if msg.Status != constant.MsgDeleted {
|
||||
seqMsgs = append(seqMsgs, &msg)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.ZWarn(ctx, "UnmarshalString failed", err, "conversationID", conversationID, "seq", seqs[i], "msg", cmd.Val())
|
||||
}
|
||||
failedSeqs = append(failedSeqs, seqs[i])
|
||||
msg := sdkws.MsgData{}
|
||||
if err = msgprocessor.String2Pb(res, &msg); err != nil {
|
||||
log.ZError(ctx, "GetMessagesBySeq Unmarshal failed", err, "res", res, "conversationID", conversationID, "seq", seq)
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
if msg.Status == constant.MsgDeleted {
|
||||
failedSeqs = append(failedSeqs, seq)
|
||||
continue
|
||||
}
|
||||
seqMsgs = append(seqMsgs, &msg)
|
||||
}
|
||||
return seqMsgs, failedSeqs, err
|
||||
|
||||
return
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for _, v := range seqs {
|
||||
// // MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
|
||||
// key := c.getMessageCacheKey(conversationID, v)
|
||||
// if err := pipe.Get(ctx, key).Err(); err != nil && err != redis.Nil {
|
||||
// return nil, nil, err
|
||||
// }
|
||||
//}
|
||||
//result, err := pipe.Exec(ctx)
|
||||
//for i, v := range result {
|
||||
// cmd := v.(*redis.StringCmd)
|
||||
// if cmd.Err() != nil {
|
||||
// failedSeqs = append(failedSeqs, seqs[i])
|
||||
// } else {
|
||||
// msg := sdkws.MsgData{}
|
||||
// err = msgprocessor.String2Pb(cmd.Val(), &msg)
|
||||
// if err == nil {
|
||||
// if msg.Status != constant.MsgDeleted {
|
||||
// seqMsgs = append(seqMsgs, &msg)
|
||||
// continue
|
||||
// }
|
||||
// } else {
|
||||
// log.ZWarn(ctx, "UnmarshalString failed", err, "conversationID", conversationID, "seq", seqs[i], "msg", cmd.Val())
|
||||
// }
|
||||
// failedSeqs = append(failedSeqs, seqs[i])
|
||||
// }
|
||||
//}
|
||||
//return seqMsgs, failedSeqs, err
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) {
|
||||
pipe := c.rdb.Pipeline()
|
||||
var failedMsgs []*sdkws.MsgData
|
||||
for _, msg := range msgs {
|
||||
key := c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
s, err := msgprocessor.Pb2String(msg)
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err()
|
||||
if err != nil {
|
||||
failedMsgs = append(failedMsgs, msg)
|
||||
log.ZWarn(ctx, "set msg 2 cache failed", err, "msg", failedMsgs)
|
||||
key := c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
if err := c.rdb.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return len(failedMsgs), err
|
||||
return len(msgs), nil
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//var failedMsgs []*sdkws.MsgData
|
||||
//for _, msg := range msgs {
|
||||
// key := c.getMessageCacheKey(conversationID, msg.Seq)
|
||||
// s, err := msgprocessor.Pb2String(msg)
|
||||
// if err != nil {
|
||||
// return 0, errs.Wrap(err)
|
||||
// }
|
||||
// err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err()
|
||||
// if err != nil {
|
||||
// failedMsgs = append(failedMsgs, msg)
|
||||
// log.ZWarn(ctx, "set msg 2 cache failed", err, "msg", failedMsgs)
|
||||
// }
|
||||
//}
|
||||
//_, err := pipe.Exec(ctx)
|
||||
//return len(failedMsgs), err
|
||||
}
|
||||
|
||||
func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string {
|
||||
@ -420,27 +435,47 @@ func (c *msgCache) getUserDelList(conversationID, userID string) string {
|
||||
}
|
||||
|
||||
func (c *msgCache) UserDeleteMsgs(ctx context.Context, conversationID string, seqs []int64, userID string) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, seq := range seqs {
|
||||
delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
|
||||
userDelListKey := c.getUserDelList(conversationID, userID)
|
||||
err := pipe.SAdd(ctx, delUserListKey, userID).Err()
|
||||
err := c.rdb.SAdd(ctx, delUserListKey, userID).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
err = pipe.SAdd(ctx, userDelListKey, seq).Err()
|
||||
err = c.rdb.SAdd(ctx, userDelListKey, seq).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := pipe.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
if err := c.rdb.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := pipe.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
if err := c.rdb.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return errs.Wrap(err)
|
||||
|
||||
return nil
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for _, seq := range seqs {
|
||||
// delUserListKey := c.getMessageDelUserListKey(conversationID, seq)
|
||||
// userDelListKey := c.getUserDelList(conversationID, userID)
|
||||
// err := pipe.SAdd(ctx, delUserListKey, userID).Err()
|
||||
// if err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// err = pipe.SAdd(ctx, userDelListKey, seq).Err()
|
||||
// if err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// if err := pipe.Expire(ctx, delUserListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
// if err := pipe.Expire(ctx, userDelListKey, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
//_, err := pipe.Exec(ctx)
|
||||
//return errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID string) (seqs []int64, err error) {
|
||||
@ -452,6 +487,7 @@ func (c *msgCache) GetUserDelList(ctx context.Context, userID, conversationID st
|
||||
for i, v := range result {
|
||||
seqs[i] = utils.StringToInt64(v)
|
||||
}
|
||||
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
@ -460,67 +496,102 @@ func (c *msgCache) DelUserDeleteMsgsList(ctx context.Context, conversationID str
|
||||
delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
|
||||
continue
|
||||
}
|
||||
if len(delUsers) > 0 {
|
||||
pipe := c.rdb.Pipeline()
|
||||
var failedFlag bool
|
||||
for _, userID := range delUsers {
|
||||
err = pipe.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
|
||||
err = c.rdb.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
|
||||
if err != nil {
|
||||
failedFlag = true
|
||||
log.ZWarn(
|
||||
ctx,
|
||||
"DelUserDeleteMsgsList failed",
|
||||
err,
|
||||
"conversationID",
|
||||
conversationID,
|
||||
"seq",
|
||||
seq,
|
||||
"userID",
|
||||
userID,
|
||||
)
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq, "userID", userID)
|
||||
}
|
||||
}
|
||||
if !failedFlag {
|
||||
if err := pipe.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
|
||||
if err := c.rdb.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
}
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
log.ZError(ctx, "pipe exec failed", err, "conversationID", conversationID, "seq", seq)
|
||||
}
|
||||
}
|
||||
}
|
||||
//for _, seq := range seqs {
|
||||
// delUsers, err := c.rdb.SMembers(ctx, c.getMessageDelUserListKey(conversationID, seq)).Result()
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// continue
|
||||
// }
|
||||
// if len(delUsers) > 0 {
|
||||
// pipe := c.rdb.Pipeline()
|
||||
// var failedFlag bool
|
||||
// for _, userID := range delUsers {
|
||||
// err = pipe.SRem(ctx, c.getUserDelList(conversationID, userID), seq).Err()
|
||||
// if err != nil {
|
||||
// failedFlag = true
|
||||
// log.ZWarn(
|
||||
// ctx,
|
||||
// "DelUserDeleteMsgsList failed",
|
||||
// err,
|
||||
// "conversationID",
|
||||
// conversationID,
|
||||
// "seq",
|
||||
// seq,
|
||||
// "userID",
|
||||
// userID,
|
||||
// )
|
||||
// }
|
||||
// }
|
||||
// if !failedFlag {
|
||||
// if err := pipe.Del(ctx, c.getMessageDelUserListKey(conversationID, seq)).Err(); err != nil {
|
||||
// log.ZWarn(ctx, "DelUserDeleteMsgsList failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// }
|
||||
// }
|
||||
// if _, err := pipe.Exec(ctx); err != nil {
|
||||
// log.ZError(ctx, "pipe exec failed", err, "conversationID", conversationID, "seq", seq)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
func (c *msgCache) DeleteMessages(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, seq := range seqs {
|
||||
if err := pipe.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil {
|
||||
if err := c.rdb.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return errs.Wrap(err)
|
||||
return nil
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for _, seq := range seqs {
|
||||
// if err := pipe.Del(ctx, c.getMessageCacheKey(conversationID, seq)).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
//_, err := pipe.Exec(ctx)
|
||||
//return errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) CleanUpOneConversationAllMsg(ctx context.Context, conversationID string) error {
|
||||
vals, err := c.rdb.Keys(ctx, c.allMessageCacheKey(conversationID)).Result()
|
||||
if err == redis.Nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
pipe := c.rdb.Pipeline()
|
||||
for _, v := range vals {
|
||||
if err := pipe.Del(ctx, v).Err(); err != nil {
|
||||
if err := c.rdb.Del(ctx, v).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
_, err = pipe.Exec(ctx)
|
||||
return errs.Wrap(err)
|
||||
return nil
|
||||
//pipe := c.rdb.Pipeline()
|
||||
//for _, v := range vals {
|
||||
// if err := pipe.Del(ctx, v).Err(); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
//}
|
||||
//_, err = pipe.Exec(ctx)
|
||||
//return errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []int64) error {
|
||||
@ -528,13 +599,15 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
|
||||
key := c.getMessageCacheKey(userID, seq)
|
||||
result, err := c.rdb.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
if err == redis.Nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
continue
|
||||
}
|
||||
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var msg sdkws.MsgData
|
||||
if err := jsonpb.UnmarshalString(result, &msg); err != nil {
|
||||
err = jsonpb.UnmarshalString(result, &msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.Status = constant.MsgDeleted
|
||||
@ -546,6 +619,7 @@ func (c *msgCache) DelMsgFromCache(ctx context.Context, userID string, seqs []in
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -571,20 +645,12 @@ func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rdb.Get(ctx, sendMsgFailedFlag+id).Int()
|
||||
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) SetFcmToken(
|
||||
ctx context.Context,
|
||||
account string,
|
||||
platformID int,
|
||||
fcmToken string,
|
||||
expireTime int64,
|
||||
) (err error) {
|
||||
return errs.Wrap(
|
||||
c.rdb.Set(ctx, fcmToken+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).
|
||||
Err(),
|
||||
)
|
||||
func (c *msgCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.rdb.Set(ctx, fcmToken+account+":"+strconv.Itoa(platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
@ -597,6 +663,7 @@ func (c *msgCache) DelFcmToken(ctx context.Context, account string, platformID i
|
||||
|
||||
func (c *msgCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
seq, err := c.rdb.Incr(ctx, userBadgeUnreadCountSum+userID).Result()
|
||||
|
||||
return int(seq), errs.Wrap(err)
|
||||
}
|
||||
|
||||
@ -610,11 +677,13 @@ func (c *msgCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string
|
||||
|
||||
func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
|
||||
|
||||
return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
|
||||
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
|
||||
|
||||
return errs.Wrap(c.rdb.Del(ctx, key).Err())
|
||||
}
|
||||
|
||||
@ -629,6 +698,7 @@ func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType in
|
||||
case constant.NotificationChatType:
|
||||
return "EX_NOTIFICATION" + clientMsgID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -637,6 +707,7 @@ func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID st
|
||||
if err != nil {
|
||||
return false, utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
return n > 0, nil
|
||||
}
|
||||
|
||||
@ -649,21 +720,11 @@ func (c *msgCache) SetMessageTypeKeyValue(
|
||||
return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageReactionExpire(
|
||||
ctx context.Context,
|
||||
clientMsgID string,
|
||||
sessionType int32,
|
||||
expiration time.Duration,
|
||||
) (bool, error) {
|
||||
func (c *msgCache) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
|
||||
return utils.Wrap2(c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessageTypeKeyValue(
|
||||
ctx context.Context,
|
||||
clientMsgID string,
|
||||
sessionType int32,
|
||||
typeKey string,
|
||||
) (string, error) {
|
||||
func (c *msgCache) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
|
||||
return utils.Wrap2(c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result())
|
||||
}
|
||||
|
||||
|
||||
201
pkg/common/db/cache/user.go
vendored
201
pkg/common/db/cache/user.go
vendored
@ -17,6 +17,7 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -70,6 +71,7 @@ func NewUserCacheRedis(
|
||||
options rockscache.Options,
|
||||
) UserCache {
|
||||
rcClient := rockscache.NewClient(rdb, options)
|
||||
|
||||
return &UserCacheRedis{
|
||||
rdb: rdb,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
@ -97,10 +99,6 @@ func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string {
|
||||
return userGlobalRecvMsgOptKey + userID
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getUserStatusHashKey(userID string, Id int32) string {
|
||||
return userID + "_" + string(Id) + platformID
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *relationtb.UserModel, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
@ -114,36 +112,42 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*relationtb.UserModel, error) {
|
||||
var keys []string
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserInfoKey(userID))
|
||||
}
|
||||
return batchGetCache(
|
||||
ctx,
|
||||
u.rcClient,
|
||||
keys,
|
||||
u.expireTime,
|
||||
func(user *relationtb.UserModel, keys []string) (int, error) {
|
||||
for i, key := range keys {
|
||||
if key == u.getUserInfoKey(user.UserID) {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return 0, errIndex
|
||||
},
|
||||
func(ctx context.Context) ([]*relationtb.UserModel, error) {
|
||||
return u.userDB.Find(ctx, userIDs)
|
||||
},
|
||||
)
|
||||
//var keys []string
|
||||
//for _, userID := range userIDs {
|
||||
// keys = append(keys, u.getUserInfoKey(userID))
|
||||
//}
|
||||
//return batchGetCache(
|
||||
// ctx,
|
||||
// u.rcClient,
|
||||
// keys,
|
||||
// u.expireTime,
|
||||
// func(user *relationtb.UserModel, keys []string) (int, error) {
|
||||
// for i, key := range keys {
|
||||
// if key == u.getUserInfoKey(user.UserID) {
|
||||
// return i, nil
|
||||
// }
|
||||
// }
|
||||
// return 0, errIndex
|
||||
// },
|
||||
// func(ctx context.Context) ([]*relationtb.UserModel, error) {
|
||||
// return u.userDB.Find(ctx, userIDs)
|
||||
// },
|
||||
//)
|
||||
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
|
||||
return u.getUserInfoKey(userID)
|
||||
}, func(ctx context.Context, userID string) (*relationtb.UserModel, error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) UserCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserInfoKey(userID))
|
||||
}
|
||||
cache := u.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
@ -160,22 +164,19 @@ func (u *UserCacheRedis) GetUserGlobalRecvMsgOpt(ctx context.Context, userID str
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache {
|
||||
var keys []string
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID))
|
||||
}
|
||||
cache := u.NewCache()
|
||||
cache.AddKeys(keys...)
|
||||
return cache
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getOnlineStatusKey(userID string) string {
|
||||
return olineStatusKey + userID
|
||||
return cache
|
||||
}
|
||||
|
||||
// GetUserStatus get user status.
|
||||
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
|
||||
var res []*user.OnlineStatus
|
||||
userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
|
||||
modKey := strconv.Itoa(int(UserIDNum % statusMod))
|
||||
@ -183,13 +184,14 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
|
||||
key := olineStatusKey + modKey
|
||||
result, err := u.rdb.HGet(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
if err == redis.Nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
// key or field does not exist
|
||||
res = append(res, &user.OnlineStatus{
|
||||
userStatus = append(userStatus, &user.OnlineStatus{
|
||||
UserID: userID,
|
||||
Status: constant.Offline,
|
||||
PlatformIDs: nil,
|
||||
})
|
||||
|
||||
continue
|
||||
} else {
|
||||
return nil, errs.Wrap(err)
|
||||
@ -201,9 +203,10 @@ func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([
|
||||
}
|
||||
onlineStatus.UserID = userID
|
||||
onlineStatus.Status = constant.Online
|
||||
res = append(res, &onlineStatus)
|
||||
userStatus = append(userStatus, &onlineStatus)
|
||||
}
|
||||
return res, nil
|
||||
|
||||
return userStatus, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Set the user status and save it in redis.
|
||||
@ -224,15 +227,16 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
|
||||
Status: constant.Online,
|
||||
PlatformIDs: []int32{platformID},
|
||||
}
|
||||
jsonData, err := json.Marshal(onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
jsonData, err2 := json.Marshal(&onlineStatus)
|
||||
if err2 != nil {
|
||||
return errs.Wrap(err2)
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
_, err2 = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
|
||||
if err2 != nil {
|
||||
return errs.Wrap(err2)
|
||||
}
|
||||
u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -240,7 +244,7 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
|
||||
isNil := false
|
||||
result, err := u.rdb.HGet(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
if err == redis.Nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
isNil = true
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
@ -248,49 +252,45 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
|
||||
}
|
||||
|
||||
if status == constant.Offline {
|
||||
if isNil {
|
||||
log.ZWarn(ctx, "this user not online,maybe trigger order not right",
|
||||
err, "userStatus", status)
|
||||
return nil
|
||||
err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var onlineStatus user.OnlineStatus
|
||||
err = json.Unmarshal([]byte(result), &onlineStatus)
|
||||
} else {
|
||||
err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var newPlatformIDs []int32
|
||||
for _, val := range onlineStatus.PlatformIDs {
|
||||
if val != platformID {
|
||||
newPlatformIDs = append(newPlatformIDs, val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
|
||||
if isNil {
|
||||
log.ZWarn(ctx, "this user not online,maybe trigger order not right",
|
||||
err, "userStatus", status)
|
||||
|
||||
return nil
|
||||
}
|
||||
var onlineStatus user.OnlineStatus
|
||||
err = json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var newPlatformIDs []int32
|
||||
for _, val := range onlineStatus.PlatformIDs {
|
||||
if val != platformID {
|
||||
newPlatformIDs = append(newPlatformIDs, val)
|
||||
}
|
||||
if newPlatformIDs == nil {
|
||||
_, err = u.rdb.HDel(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
onlineStatus.PlatformIDs = newPlatformIDs
|
||||
newjsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
if newPlatformIDs == nil {
|
||||
_, err = u.rdb.HDel(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
var onlineStatus user.OnlineStatus
|
||||
if !isNil {
|
||||
err = json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
onlineStatus.Status = constant.Online
|
||||
onlineStatus.UserID = userID
|
||||
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
|
||||
onlineStatus.PlatformIDs = newPlatformIDs
|
||||
newjsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
@ -299,8 +299,49 @@ func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, statu
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
|
||||
var onlineStatus user.OnlineStatus
|
||||
if !isNil {
|
||||
err2 := json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err2)
|
||||
}
|
||||
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
|
||||
} else {
|
||||
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
|
||||
}
|
||||
onlineStatus.Status = constant.Online
|
||||
onlineStatus.UserID = userID
|
||||
newjsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Comparable interface {
|
||||
~int | ~string | ~float64 | ~int32
|
||||
}
|
||||
|
||||
func RemoveRepeatedElementsInList[T Comparable](slc []T) []T {
|
||||
var result []T
|
||||
tempMap := map[T]struct{}{}
|
||||
for _, e := range slc {
|
||||
if _, found := tempMap[e]; !found {
|
||||
tempMap[e] = struct{}{}
|
||||
result = append(result, e)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@ -50,7 +50,7 @@ type ConversationDatabase interface {
|
||||
GetConversationIDs(ctx context.Context, userID string) ([]string, error)
|
||||
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
|
||||
GetAllConversationIDs(ctx context.Context) ([]string, error)
|
||||
GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
//GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error)
|
||||
GetConversationIDsNeedDestruct(ctx context.Context) ([]*relationtb.ConversationModel, error)
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
@ -295,9 +295,9 @@ func (c *conversationDatabase) GetAllConversationIDs(ctx context.Context) ([]str
|
||||
return c.conversationDB.GetAllConversationIDs(ctx)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
|
||||
return c.cache.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
}
|
||||
//func (c *conversationDatabase) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
|
||||
// return c.cache.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
//}
|
||||
|
||||
func (c *conversationDatabase) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
return c.conversationDB.GetConversationsByConversationID(ctx, conversationIDs)
|
||||
|
||||
@ -60,9 +60,11 @@ func (g *ConversationLocalCache) GetConversationIDs(ctx context.Context, userID
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
hash, ok := g.conversationIDs[userID]
|
||||
g.lock.Unlock()
|
||||
|
||||
if !ok || hash.hash != resp.Hash {
|
||||
conversationIDsResp, err := g.client.Client.GetConversationIDs(ctx, &conversation.GetConversationIDsReq{
|
||||
UserID: userID,
|
||||
@ -70,11 +72,16 @@ func (g *ConversationLocalCache) GetConversationIDs(ctx context.Context, userID
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
g.conversationIDs[userID] = Hash{
|
||||
hash: resp.Hash,
|
||||
ids: conversationIDsResp.ConversationIDs,
|
||||
}
|
||||
|
||||
return conversationIDsResp.ConversationIDs, nil
|
||||
}
|
||||
|
||||
return hash.ids, nil
|
||||
}
|
||||
|
||||
@ -24,6 +24,7 @@ func (l *LogGorm) Search(ctx context.Context, keyword string, start time.Time, e
|
||||
if end.UnixMilli() != 0 {
|
||||
db = l.db.WithContext(ctx).Where("create_time <= ?", end)
|
||||
}
|
||||
db = db.Order("create_time desc")
|
||||
return ormutil.GormSearch[relationtb.Log](db, []string{"user_id"}, keyword, pageNumber, showNumber)
|
||||
}
|
||||
|
||||
|
||||
@ -63,13 +63,12 @@ const (
|
||||
)
|
||||
|
||||
func NewMinio() (s3.Interface, error) {
|
||||
conf := config.Config.Object.Minio
|
||||
u, err := url.Parse(conf.Endpoint)
|
||||
u, err := url.Parse(config.Config.Object.Minio.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := &minio.Options{
|
||||
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
|
||||
Creds: credentials.NewStaticV4(config.Config.Object.Minio.AccessKeyID, config.Config.Object.Minio.SecretAccessKey, config.Config.Object.Minio.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
}
|
||||
client, err := minio.New(u.Host, opts)
|
||||
@ -77,29 +76,35 @@ func NewMinio() (s3.Interface, error) {
|
||||
return nil, err
|
||||
}
|
||||
m := &Minio{
|
||||
bucket: conf.Bucket,
|
||||
bucket: config.Config.Object.Minio.Bucket,
|
||||
core: &minio.Core{Client: client},
|
||||
lock: &sync.Mutex{},
|
||||
init: false,
|
||||
}
|
||||
if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint {
|
||||
if config.Config.Object.Minio.SignEndpoint == "" || config.Config.Object.Minio.SignEndpoint == config.Config.Object.Minio.Endpoint {
|
||||
m.opts = opts
|
||||
m.sign = m.core.Client
|
||||
m.bucketURL = conf.Endpoint + "/" + conf.Bucket + "/"
|
||||
m.prefix = u.Path
|
||||
u.Path = ""
|
||||
config.Config.Object.Minio.Endpoint = u.String()
|
||||
m.signEndpoint = config.Config.Object.Minio.Endpoint
|
||||
} else {
|
||||
su, err := url.Parse(conf.SignEndpoint)
|
||||
su, err := url.Parse(config.Config.Object.Minio.SignEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.opts = &minio.Options{
|
||||
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
|
||||
Creds: credentials.NewStaticV4(config.Config.Object.Minio.AccessKeyID, config.Config.Object.Minio.SecretAccessKey, config.Config.Object.Minio.SessionToken),
|
||||
Secure: su.Scheme == "https",
|
||||
}
|
||||
m.sign, err = minio.New(su.Host, m.opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.bucketURL = conf.SignEndpoint + "/" + conf.Bucket + "/"
|
||||
m.prefix = su.Path
|
||||
su.Path = ""
|
||||
config.Config.Object.Minio.SignEndpoint = su.String()
|
||||
m.signEndpoint = config.Config.Object.Minio.SignEndpoint
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
@ -110,14 +115,15 @@ func NewMinio() (s3.Interface, error) {
|
||||
}
|
||||
|
||||
type Minio struct {
|
||||
bucket string
|
||||
bucketURL string
|
||||
location string
|
||||
opts *minio.Options
|
||||
core *minio.Core
|
||||
sign *minio.Client
|
||||
lock sync.Locker
|
||||
init bool
|
||||
bucket string
|
||||
signEndpoint string
|
||||
location string
|
||||
opts *minio.Options
|
||||
core *minio.Core
|
||||
sign *minio.Client
|
||||
lock sync.Locker
|
||||
init bool
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (m *Minio) initMinio(ctx context.Context) error {
|
||||
@ -255,7 +261,7 @@ func (m *Minio) AuthSign(ctx context.Context, uploadID string, name string, expi
|
||||
return nil, err
|
||||
}
|
||||
result := s3.AuthSignResult{
|
||||
URL: m.bucketURL + name,
|
||||
URL: m.signEndpoint + "/" + m.bucket + "/" + name,
|
||||
Query: url.Values{"uploadId": {uploadID}},
|
||||
Parts: make([]s3.SignPart, len(partNumbers)),
|
||||
}
|
||||
@ -269,11 +275,13 @@ func (m *Minio) AuthSign(ctx context.Context, uploadID string, name string, expi
|
||||
request = signer.SignV4Trailer(*request, creds.AccessKeyID, creds.SecretAccessKey, creds.SessionToken, m.location, nil)
|
||||
result.Parts[i] = s3.SignPart{
|
||||
PartNumber: partNumber,
|
||||
URL: request.URL.String(),
|
||||
Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}},
|
||||
Header: request.Header,
|
||||
}
|
||||
}
|
||||
if m.prefix != "" {
|
||||
result.URL = m.signEndpoint + m.prefix + "/" + m.bucket + "/" + name
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
@ -285,6 +293,9 @@ func (m *Minio) PresignedPutObject(ctx context.Context, name string, expire time
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if m.prefix != "" {
|
||||
rawURL.Path = path.Join(m.prefix, rawURL.Path)
|
||||
}
|
||||
return rawURL.String(), nil
|
||||
}
|
||||
|
||||
@ -396,6 +407,9 @@ func (m *Minio) presignedGetObject(ctx context.Context, name string, expire time
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if m.prefix != "" {
|
||||
rawURL.Path = path.Join(m.prefix, rawURL.Path)
|
||||
}
|
||||
return rawURL.String(), nil
|
||||
}
|
||||
|
||||
|
||||
0
pkg/common/db/s3/oss/oss.go
Normal file → Executable file
0
pkg/common/db/s3/oss/oss.go
Normal file → Executable file
0
pkg/common/db/unrelation/mongo.go
Normal file → Executable file
0
pkg/common/db/unrelation/mongo.go
Normal file → Executable file
0
pkg/common/db/unrelation/msg.go
Normal file → Executable file
0
pkg/common/db/unrelation/msg.go
Normal file → Executable file
0
pkg/common/db/unrelation/user.go
Normal file → Executable file
0
pkg/common/db/unrelation/user.go
Normal file → Executable file
@ -86,3 +86,6 @@ func (cli *K8sDR) GetClientLocalConns() map[string][]*grpc.ClientConn {
|
||||
fmt.Println("should not call this function!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
return nil
|
||||
}
|
||||
func (cli *K8sDR) Close() {
|
||||
return
|
||||
}
|
||||
|
||||
@ -12,4 +12,5 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package startrpc // import "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
// Package startrpc start rpc server.
|
||||
package startrpc
|
||||
|
||||
@ -16,11 +16,12 @@ package startrpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -32,6 +33,7 @@ import (
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
)
|
||||
|
||||
// Start rpc server.
|
||||
func Start(
|
||||
rpcPort int,
|
||||
rpcRegisterName string,
|
||||
@ -39,16 +41,8 @@ func Start(
|
||||
rpcFn func(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error,
|
||||
options ...grpc.ServerOption,
|
||||
) error {
|
||||
fmt.Println(
|
||||
"start",
|
||||
rpcRegisterName,
|
||||
"server, port: ",
|
||||
rpcPort,
|
||||
"prometheusPort:",
|
||||
prometheusPort,
|
||||
", OpenIM version: ",
|
||||
config.Version,
|
||||
)
|
||||
fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s",
|
||||
rpcRegisterName, rpcPort, prometheusPort, config.Version)
|
||||
listener, err := net.Listen(
|
||||
"tcp",
|
||||
net.JoinHostPort(network.GetListenIP(config.Config.Rpc.ListenIP), strconv.Itoa(rpcPort)),
|
||||
@ -57,18 +51,12 @@ func Start(
|
||||
return err
|
||||
}
|
||||
defer listener.Close()
|
||||
zkClient, err := discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery)
|
||||
/*
|
||||
zkClient, err := zookeeper.NewClient(config.Config.Zookeeper.ZkAddr, config.Config.Zookeeper.Schema,
|
||||
zookeeper.WithFreq(time.Hour), zookeeper.WithUserNameAndPassword(
|
||||
config.Config.Zookeeper.Username,
|
||||
config.Config.Zookeeper.Password,
|
||||
), zookeeper.WithRoundRobin(), zookeeper.WithTimeout(10), zookeeper.WithLogger(log.NewZkLogger()))*/
|
||||
client, err := discovery_register.NewDiscoveryRegister(config.Config.Envs.Discovery)
|
||||
if err != nil {
|
||||
return utils.Wrap1(err)
|
||||
}
|
||||
//defer zkClient.CloseZK()
|
||||
zkClient.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
defer client.Close()
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
registerIP, err := network.GetRpcRegisterIP(config.Config.Rpc.RegisterIP)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -88,11 +76,11 @@ func Start(
|
||||
}
|
||||
srv := grpc.NewServer(options...)
|
||||
defer srv.GracefulStop()
|
||||
err = rpcFn(zkClient, srv)
|
||||
err = rpcFn(client, srv)
|
||||
if err != nil {
|
||||
return utils.Wrap1(err)
|
||||
}
|
||||
err = zkClient.Register(
|
||||
err = client.Register(
|
||||
rpcRegisterName,
|
||||
registerIP,
|
||||
rpcPort,
|
||||
@ -108,5 +96,6 @@ func Start(
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return utils.Wrap1(srv.Serve(listener))
|
||||
}
|
||||
|
||||
0
pkg/common/tls/tls.go
Normal file → Executable file
0
pkg/common/tls/tls.go
Normal file → Executable file
@ -59,8 +59,13 @@ func (c *ConversationRpcClient) GetSingleConversationRecvMsgOpt(ctx context.Cont
|
||||
return conversation.GetConversation().RecvMsgOpt, err
|
||||
}
|
||||
|
||||
func (c *ConversationRpcClient) SingleChatFirstCreateConversation(ctx context.Context, recvID, sendID string) error {
|
||||
_, err := c.Client.CreateSingleChatConversations(ctx, &pbconversation.CreateSingleChatConversationsReq{RecvID: recvID, SendID: sendID})
|
||||
func (c *ConversationRpcClient) SingleChatFirstCreateConversation(ctx context.Context, recvID, sendID,
|
||||
conversationID string, conversationType int32) error {
|
||||
_, err := c.Client.CreateSingleChatConversations(ctx,
|
||||
&pbconversation.CreateSingleChatConversationsReq{
|
||||
RecvID: recvID, SendID: sendID, ConversationID: conversationID,
|
||||
ConversationType: conversationType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -228,8 +228,9 @@ func (s *NotificationSender) NotificationWithSesstionType(ctx context.Context, s
|
||||
}
|
||||
var req msg.SendMsgReq
|
||||
var msg sdkws.MsgData
|
||||
var userInfo *sdkws.UserInfo
|
||||
if notificationOpt.WithRpcGetUsername && s.getUserInfo != nil {
|
||||
userInfo, err := s.getUserInfo(ctx, sendID)
|
||||
userInfo, err = s.getUserInfo(ctx, sendID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "getUserInfo failed", err, "sendID", sendID)
|
||||
} else {
|
||||
@ -251,7 +252,7 @@ func (s *NotificationSender) NotificationWithSesstionType(ctx context.Context, s
|
||||
msg.CreateTime = utils.GetCurrentTimestampByMill()
|
||||
msg.ClientMsgID = utils.GetMsgID(sendID)
|
||||
optionsConfig := s.contentTypeConf[contentType]
|
||||
if sesstionType == constant.SuperGroupChatType && contentType == constant.HasReadReceipt {
|
||||
if sendID == recvID && contentType == constant.HasReadReceipt {
|
||||
optionsConfig.ReliabilityLevel = constant.UnreliableNotification
|
||||
}
|
||||
options := config.GetOptionsByNotification(optionsConfig)
|
||||
|
||||
@ -41,6 +41,7 @@ func (c *ConversationNotificationSender) ConversationSetPrivateNotification(ctx
|
||||
IsPrivate: isPrivateChat,
|
||||
ConversationID: conversationID,
|
||||
}
|
||||
|
||||
return c.Notification(ctx, sendID, recvID, constant.ConversationPrivateChatNotification, tips)
|
||||
}
|
||||
|
||||
@ -50,6 +51,7 @@ func (c *ConversationNotificationSender) ConversationChangeNotification(ctx cont
|
||||
UserID: userID,
|
||||
ConversationIDList: conversationIDs,
|
||||
}
|
||||
|
||||
return c.Notification(ctx, userID, userID, constant.ConversationChangeNotification, tips)
|
||||
}
|
||||
|
||||
@ -65,5 +67,6 @@ func (c *ConversationNotificationSender) ConversationUnreadChangeNotification(
|
||||
HasReadSeq: hasReadSeq,
|
||||
UnreadCountTime: unreadCountTime,
|
||||
}
|
||||
|
||||
return c.Notification(ctx, userID, userID, constant.ConversationUnreadNotification, tips)
|
||||
}
|
||||
0
pkg/rpcclient/notification/group.go
Normal file → Executable file
0
pkg/rpcclient/notification/group.go
Normal file → Executable file
0
pkg/rpcclient/third.go
Normal file → Executable file
0
pkg/rpcclient/third.go
Normal file → Executable file
@ -64,9 +64,9 @@ set +e
|
||||
|
||||
# Later, after discarding Docker, the Docker keyword is unreliable, and Kubepods is used
|
||||
if grep -qE 'docker|kubepods' /proc/1/cgroup || [ -f /.dockerenv ]; then
|
||||
openim::color::echo ${COLOR_BLUE} "Environment in the interior of the container"
|
||||
openim::color::echo ${COLOR_CYAN} "Environment in the interior of the container"
|
||||
else
|
||||
openim::color::echo ${COLOR_BLUE} "The environment is outside the container"
|
||||
openim::color::echo ${COLOR_CYAN} "The environment is outside the container"
|
||||
openim::util::check_ports ${OPENIM_DEPENDENCY_PORT_LISTARIES[@]} || return 0
|
||||
fi
|
||||
|
||||
|
||||
@ -37,6 +37,8 @@ readonly ipv6regex='(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:
|
||||
clear
|
||||
. $(dirname ${BASH_SOURCE})/lib/util.sh
|
||||
|
||||
openim::util::ensure-bash-version
|
||||
|
||||
trap 'openim::util::onCtrlC' INT
|
||||
|
||||
function openim::util::onCtrlC() {
|
||||
|
||||
@ -19,11 +19,7 @@ source "${OPENIM_ROOT}/scripts/install/common.sh"
|
||||
|
||||
cd "$OPENIM_ROOT"
|
||||
|
||||
if command -v docker-compose &> /dev/null; then
|
||||
docker-compose ps
|
||||
else
|
||||
docker compose ps
|
||||
fi
|
||||
openim::util::check_docker_and_compose_versions
|
||||
|
||||
progress() {
|
||||
local _main_pid="$1"
|
||||
|
||||
@ -30,6 +30,8 @@ openim::util::ensure_docker_daemon_connectivity
|
||||
|
||||
DOCKER_COMPOSE_COMMAND=
|
||||
# Check if docker-compose command is available
|
||||
openim::util::check_docker_and_compose_versions
|
||||
|
||||
if command -v docker compose &> /dev/null
|
||||
then
|
||||
openim::log::info "docker compose command is available"
|
||||
|
||||
@ -51,17 +51,7 @@ execute_scripts() {
|
||||
"${OPENIM_ROOT}"/scripts/env_check.sh
|
||||
}
|
||||
|
||||
# Start docker compose
|
||||
start_docker_compose() {
|
||||
openim::log::info "Checking if docker-compose command is available"
|
||||
if command -v docker-compose &> /dev/null; then
|
||||
docker-compose up -d
|
||||
else
|
||||
docker compose up -d
|
||||
fi
|
||||
|
||||
"${OPENIM_ROOT}"/scripts/docker-check-service.sh
|
||||
}
|
||||
openim::util::check_docker_and_compose_versions
|
||||
|
||||
main() {
|
||||
load_env
|
||||
|
||||
@ -250,6 +250,39 @@ openim::util::host_arch() {
|
||||
echo "${host_arch}"
|
||||
}
|
||||
|
||||
# Define a bash function to check the versions of Docker and Docker Compose
|
||||
openim::util::check_docker_and_compose_versions() {
|
||||
# Define the required versions of Docker and Docker Compose
|
||||
required_docker_version="20.10.0"
|
||||
required_compose_version="2.0"
|
||||
|
||||
# Get the currently installed Docker version
|
||||
installed_docker_version=$(docker --version | awk '{print $3}' | sed 's/,//')
|
||||
|
||||
# Check if the installed Docker version matches the required version
|
||||
if [[ "$installed_docker_version" < "$required_docker_version" ]]; then
|
||||
echo "Docker version mismatch. Installed: $installed_docker_version, Required: $required_docker_version"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if the docker compose sub-command is available
|
||||
if ! docker compose version &> /dev/null; then
|
||||
echo "Docker does not support the docker compose sub-command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get the currently installed Docker Compose version
|
||||
installed_compose_version=$(docker compose version --short)
|
||||
|
||||
# Check if the installed Docker Compose version matches the required version
|
||||
if [[ "$installed_compose_version" < "$required_compose_version" ]]; then
|
||||
echo "Docker Compose version mismatch. Installed: $installed_compose_version, Required: $required_compose_version"
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
# The `openim::util::check_ports` function analyzes the state of processes based on given ports.
|
||||
# It accepts multiple ports as arguments and prints:
|
||||
# 1. The state of the process (whether it's running or not).
|
||||
@ -1321,3 +1354,7 @@ function openim::util::gen_os_arch() {
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "$*" =~ openim::util:: ]];then
|
||||
eval $*
|
||||
fi
|
||||
@ -167,6 +167,7 @@ define MAKEFILE_EXAMPLE
|
||||
# make install-deepcopy-gen Install deepcopy-gen tools if the license is missing.
|
||||
# make build BINS=openim-api V=1 DEBUG=1 Build debug binaries for only openim-api.
|
||||
# make multiarch -j PLATFORMS="linux_arm64 linux_amd64" V=1 Build binaries for both platforms.
|
||||
# make image
|
||||
endef
|
||||
export MAKEFILE_EXAMPLE
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
#
|
||||
|
||||
GO := go
|
||||
GO_SUPPORTED_VERSIONS ?= 1.18|1.19|1.20|1.21|1.22
|
||||
GO_SUPPORTED_VERSIONS ?= 1.19|1.20|1.21|1.22
|
||||
|
||||
GO_LDFLAGS += -X $(VERSION_PACKAGE).gitVersion=$(GIT_TAG) \
|
||||
-X $(VERSION_PACKAGE).gitCommit=$(GIT_COMMIT) \
|
||||
@ -45,7 +45,7 @@ ifeq ($(origin GOBIN), undefined)
|
||||
endif
|
||||
|
||||
# COMMANDS is Specify all files under ${ROOT_DIR}/cmd/ and ${ROOT_DIR}/tools/ except those ending in.md
|
||||
COMMANDS ?= $(filter-out %.md, $(wildcard ${ROOT_DIR}/cmd/* ${ROOT_DIR}/tools/* ${ROOT_DIR}/cmd/openim-rpc/*))
|
||||
COMMANDS ?= $(filter-out %.md, $(wildcard ${ROOT_DIR}/cmd/* ${ROOT_DIR}/tools/* ${ROOT_DIR}/tools/data-conversion/chat/cmd/* ${ROOT_DIR}/tools/data-conversion/openim/cmd/* ${ROOT_DIR}/cmd/openim-rpc/*))
|
||||
ifeq (${COMMANDS},)
|
||||
$(error Could not determine COMMANDS, set ROOT_DIR or run in source dir)
|
||||
endif
|
||||
@ -136,6 +136,7 @@ ifneq ($(shell $(GO) version | grep -q -E '\bgo($(GO_SUPPORTED_VERSIONS))\b' &&
|
||||
$(error unsupported go version. Please make install one of the following supported version: '$(GO_SUPPORTED_VERSIONS)')
|
||||
endif
|
||||
|
||||
## go.build.%: Build binaries for a specific platform
|
||||
.PHONY: go.build.%
|
||||
go.build.%:
|
||||
$(eval COMMAND := $(word 2,$(subst ., ,$*)))
|
||||
@ -159,6 +160,14 @@ go.build.%:
|
||||
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
|
||||
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/$(COMMAND)/$(COMMAND).go; \
|
||||
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
|
||||
elif [ -f $(ROOT_DIR)/tools/data-conversion/openim/cmd/$(COMMAND)/$(COMMAND).go ]; then \
|
||||
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
|
||||
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/data-conversion/openim/cmd/$(COMMAND)/$(COMMAND).go; \
|
||||
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
|
||||
elif [ -f $(ROOT_DIR)/tools/data-conversion/chat/cmd/$(COMMAND)/$(COMMAND).go ]; then \
|
||||
CGO_ENABLED=0 GOOS=$(OS) GOARCH=$(ARCH) $(GO) build $(GO_BUILD_FLAGS) -o \
|
||||
$(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT) $(ROOT_DIR)/tools/data-conversion/chat/cmd/$(COMMAND)/$(COMMAND).go; \
|
||||
chmod +x $(BIN_TOOLS_DIR)/$(OS)/$(ARCH)/$(COMMAND)$(GO_OUT_EXT); \
|
||||
fi \
|
||||
fi
|
||||
|
||||
|
||||
@ -21,7 +21,6 @@
|
||||
#
|
||||
|
||||
DOCKER := docker
|
||||
DOCKER_SUPPORTED_API_VERSION ?= 1.32|1.40|1.41|1.42
|
||||
|
||||
# read: https://github.com/openimsdk/open-im-server/blob/main/docs/conversions/images.md
|
||||
REGISTRY_PREFIX ?= ghcr.io/openimsdk
|
||||
@ -43,8 +42,8 @@ endif
|
||||
|
||||
# Determine image files by looking into build/images/*/Dockerfile
|
||||
IMAGES_DIR ?= $(wildcard ${ROOT_DIR}/build/images/*)
|
||||
# Determine images names by stripping out the dir names
|
||||
IMAGES ?= $(filter-out tools,$(foreach image,${IMAGES_DIR},$(notdir ${image})))
|
||||
# Determine images names by stripping out the dir names, and filter out the undesired directories
|
||||
IMAGES ?= $(filter-out Dockerfile openim-tools openim-cmdutils,$(foreach image,${IMAGES_DIR},$(notdir ${image})))
|
||||
|
||||
ifeq (${IMAGES},)
|
||||
$(error Could not determine IMAGES, set ROOT_DIR or run in source dir)
|
||||
@ -75,22 +74,13 @@ image.docker-buildx:
|
||||
## image.verify: Verify docker version
|
||||
.PHONY: image.verify
|
||||
image.verify:
|
||||
$(eval API_VERSION := $(shell $(DOCKER) version | grep -E 'API version: {1,6}[0-9]' | head -n1 | awk '{print $$3} END { if (NR==0) print 0}' ))
|
||||
$(eval PASS := $(shell echo "$(API_VERSION) > $(DOCKER_SUPPORTED_API_VERSION)" | bc))
|
||||
@if [ $(PASS) -ne 1 ]; then \
|
||||
$(DOCKER) -v ;\
|
||||
echo "Unsupported docker version. Docker API version should be greater than $(DOCKER_SUPPORTED_API_VERSION)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ROOT_DIR)/scripts/lib/util.sh openim::util::check_docker_and_compose_versions
|
||||
|
||||
## image.daemon.verify: Verify docker daemon experimental features
|
||||
.PHONY: image.daemon.verify
|
||||
image.daemon.verify:
|
||||
$(eval PASS := $(shell $(DOCKER) version | grep -q -E 'Experimental: {1,5}true' && echo 1 || echo 0))
|
||||
@if [ $(PASS) -ne 1 ]; then \
|
||||
echo "Experimental features of Docker daemon is not enabled. Please add \"experimental\": true in '/etc/docker/daemon.json' and then restart Docker daemon."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ROOT_DIR)/scripts/lib/util.sh openim::util::ensure_docker_daemon_connectivity
|
||||
@$(ROOT_DIR)/scripts/lib/util.sh openim::util::ensure-docker-buildx
|
||||
|
||||
# If you wish built the manager image targeting other platforms you can use the --platform flag.
|
||||
# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it.
|
||||
@ -110,10 +100,9 @@ image.build.%: go.build.%
|
||||
$(eval ARCH := $(word 2,$(subst _, ,$(PLATFORM))))
|
||||
@echo "===========> Building docker image $(IMAGE) $(VERSION) for $(IMAGE_PLAT)"
|
||||
@mkdir -p $(TMP_DIR)/$(IMAGE)/$(PLATFORM)
|
||||
@awk '/FROM/ {c++; if (c==2) {print; next}} c>=2' $(ROOT_DIR)/build/images/$(IMAGE)/Dockerfile \
|
||||
| sed -e "s#BASE_IMAGE#$(BASE_IMAGE)#g" \
|
||||
-e 's/--from=builder //g' \
|
||||
-e 's#COPY /openim/openim-server/#COPY ./#g' > $(TMP_DIR)/$(IMAGE)/Dockerfile
|
||||
@cat $(ROOT_DIR)/build/images/Dockerfile\
|
||||
| sed "s#BASE_IMAGE#$(BASE_IMAGE)#g" \
|
||||
| sed "s#BINARY_NAME#$(IMAGE)#g" >$(TMP_DIR)/$(IMAGE)/Dockerfile
|
||||
@cp $(BIN_DIR)/platforms/$(IMAGE_PLAT)/$(IMAGE) $(TMP_DIR)/$(IMAGE)
|
||||
$(eval BUILD_SUFFIX := $(_DOCKER_BUILD_EXTRA_ARGS) --pull -t $(REGISTRY_PREFIX)/$(IMAGE)-$(ARCH):$(VERSION) $(TMP_DIR)/$(IMAGE))
|
||||
@if [ $(shell $(GO) env GOARCH) != $(ARCH) ] ; then \
|
||||
@ -124,7 +113,6 @@ image.build.%: go.build.%
|
||||
fi
|
||||
@rm -rf $(TMP_DIR)/$(IMAGE)
|
||||
|
||||
|
||||
# https://docs.docker.com/build/building/multi-platform/
|
||||
# busybox image supports amd64, arm32v5, arm32v6, arm32v7, arm64v8, i386, ppc64le, and s390x
|
||||
## image.buildx.%: Build docker images with buildx
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
module github.com/openimsdk/open-im-server/test/typecheck
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require golang.org/x/tools v0.12.0
|
||||
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
module github.com/openimsdk/open-im-server/v3/tools/changelog
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
@ -35,7 +35,6 @@ import (
|
||||
"github.com/go-zookeeper/zk"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/mongo/readpref"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@ -158,26 +157,38 @@ func checkMysql() error {
|
||||
|
||||
func checkMongo() error {
|
||||
var client *mongo.Client
|
||||
uri := "mongodb://sample.host:27017/?maxPoolSize=20&w=majority"
|
||||
defer func() {
|
||||
if client != nil {
|
||||
client.Disconnect(context.TODO())
|
||||
}
|
||||
}()
|
||||
mongodbHosts := ""
|
||||
for i, v := range config.Config.Mongo.Address {
|
||||
if i == len(config.Config.Mongo.Address)-1 {
|
||||
mongodbHosts += v
|
||||
if config.Config.Mongo.Uri != "" {
|
||||
uri = config.Config.Mongo.Uri
|
||||
} else {
|
||||
mongodbHosts := ""
|
||||
for i, v := range config.Config.Mongo.Address {
|
||||
if i == len(config.Config.Mongo.Address)-1 {
|
||||
mongodbHosts += v
|
||||
} else {
|
||||
mongodbHosts += v + ","
|
||||
}
|
||||
}
|
||||
if config.Config.Mongo.Password != "" && config.Config.Mongo.Username != "" {
|
||||
uri = fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin",
|
||||
config.Config.Mongo.Username, config.Config.Mongo.Password, mongodbHosts,
|
||||
config.Config.Mongo.Database, config.Config.Mongo.MaxPoolSize)
|
||||
} else {
|
||||
mongodbHosts += v + ","
|
||||
uri = fmt.Sprintf("mongodb://%s/%s/?maxPoolSize=%d&authSource=admin",
|
||||
mongodbHosts, config.Config.Mongo.Database,
|
||||
config.Config.Mongo.MaxPoolSize)
|
||||
}
|
||||
}
|
||||
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(
|
||||
fmt.Sprintf("mongodb://%v:%v@%v/?authSource=admin",
|
||||
config.Config.Mongo.Username, config.Config.Mongo.Password, mongodbHosts)))
|
||||
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(uri))
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
} else {
|
||||
err = client.Ping(context.TODO(), &readpref.ReadPref{})
|
||||
err = client.Ping(context.TODO(), nil)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
module github.com/openimsdk/open-im-server/v3/tools/component
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
98
tools/data-conversion/README.md
Normal file
98
tools/data-conversion/README.md
Normal file
@ -0,0 +1,98 @@
|
||||
# OpenIM V2 至 V3 数据迁移指南
|
||||
|
||||
该指南提供了从 OpenIM V2 迁移至 V3 的详细步骤。请确保在开始迁移过程之前,熟悉所有步骤,并按照指南准确执行。
|
||||
|
||||
+ [OpenIM Chat](https://github.com/OpenIMSDK/chat)
|
||||
+ [OpenIM Server](https://github.com/OpenIMSDK/Open-IM-Server)
|
||||
|
||||
|
||||
|
||||
### 1. 数据备份
|
||||
|
||||
在开始数据迁移之前,强烈建议备份所有相关的数据以防止任何可能的数据丢失。
|
||||
|
||||
### 2. 迁移 OpenIM MySQL 数据
|
||||
|
||||
+ 位置: `open-im-server/v3/tools/data-conversion/openim/mysql.go`
|
||||
+ 配置 `mysql.go` 文件中的数据库信息。
|
||||
+ 手动创建 V3 版本的数据库,并确保字符集为 `utf8mb4`。
|
||||
|
||||
```bash
|
||||
// V2 数据库配置
|
||||
var (
|
||||
usernameV2 = "root"
|
||||
passwordV2 = "openIM"
|
||||
addrV2 = "127.0.0.1:13306"
|
||||
databaseV2 = "openIM_v2"
|
||||
)
|
||||
|
||||
// V3 数据库配置
|
||||
var (
|
||||
usernameV3 = "root"
|
||||
passwordV3 = "openIM123"
|
||||
addrV3 = "127.0.0.1:13306"
|
||||
databaseV3 = "openIM_v3"
|
||||
)
|
||||
```
|
||||
|
||||
**执行数据迁移命令:**
|
||||
|
||||
```bash
|
||||
make build BINS="conversion-mysql"
|
||||
```
|
||||
|
||||
启动的二进制在 `_output/bin/tools` 中
|
||||
|
||||
|
||||
### 3. 转换聊天消息(可选)
|
||||
|
||||
+ 只支持转换存储在 Kafka 中的消息。
|
||||
+ 位置: `open-im-server/v3/tools/data-conversion/openim/msg.go`
|
||||
+ 配置 `msg.go` 文件中的消息和服务器信息。
|
||||
|
||||
```bash
|
||||
var (
|
||||
topic = "ws2ms_chat" // V2 版本 Kafka 主题
|
||||
kafkaAddr = "127.0.0.1:9092" // V2 版本 Kafka 地址
|
||||
rpcAddr = "127.0.0.1:10130" // V3 版本 RPC 地址
|
||||
adminUserID = "openIM123456" // V3 版本管理员用户ID
|
||||
concurrency = 4 // 并发数量
|
||||
)
|
||||
```
|
||||
|
||||
**执行数据迁移命令:**
|
||||
|
||||
```bash
|
||||
make build BINS="conversion-msg"
|
||||
```
|
||||
|
||||
### 4. 转换业务服务器数据
|
||||
|
||||
+ 只支持转换存储在 Kafka 中的消息。
|
||||
+ 位置: `open-im-server/v3/tools/data-conversion/chat/chat.go`
|
||||
+ 需要手动创建 V3 版本的数据库,并确保字符集为 `utf8mb4`。
|
||||
+ 配置 `main.go` 文件中的数据库信息。
|
||||
|
||||
```bash
|
||||
// V2 数据库配置
|
||||
var (
|
||||
usernameV2 = "root"
|
||||
passwordV2 = "openIM"
|
||||
addrV2 = "127.0.0.1:13306"
|
||||
databaseV2 = "admin_chat"
|
||||
)
|
||||
|
||||
// V3 数据库配置
|
||||
var (
|
||||
usernameV3 = "root"
|
||||
passwordV3 = "openIM123"
|
||||
addrV3 = "127.0.0.1:13306"
|
||||
databaseV3 = "openim_enterprise"
|
||||
)
|
||||
```
|
||||
|
||||
**执行数据迁移命令:**
|
||||
|
||||
```bash
|
||||
make build BINS="conversion-chat"
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user