diff --git a/deployments/Readme.md b/deployments/Readme.md index a7b288130..8da4f90aa 100644 --- a/deployments/Readme.md +++ b/deployments/Readme.md @@ -1,175 +1,188 @@ -# OpenIM Application Containerization Deployment Guide +# Kubernetes Deployment -OpenIM supports a variety of cluster deployment methods, including but not limited to `helm`, `sealos`, `kustomize` +## Resource Requests -Various contributors, as well as previous official releases, have provided some referenceable solutions: +- CPU: 2 cores +- Memory: 4 GiB +- Disk usage: 20 GiB (on Node) -+ [k8s-jenkins Repository](https://github.com/OpenIMSDK/k8s-jenkins) -+ [open-im-server-k8s-deploy Repository](https://github.com/openimsdk/open-im-server-k8s-deploy) -+ [openim-charts Repository](https://github.com/OpenIMSDK/openim-charts) -+ [deploy-openim Repository](https://github.com/showurl/deploy-openim) +## Preconditions -### Dependency Check +ensure that you have already deployed the following components: -```bash -Kubernetes: >= 1.16.0-0 -Helm: >= 3.0 -``` +- Redis +- MongoDB +- Kafka +- MinIO -### Minimum Configuration +## Origin Deploy -The recommended minimum configuration for a production environment is as follows: +### Enter the target dir + +`cd ./deployments/deploy/` + +### Deploy configs and dependencies + +Upate your configMap `openim-config.yml`. **You can check the official docs for more details.** + +In `openim-config.yml`, you need modify the following configurations: + +**discovery.yml** + +- `kubernetes.namespace`: default is `default`, you can change it to your namespace. + +**mongodb.yml** + +- `address`: set to your already mongodb address or mongo Service name and port in your deployed. +- `database`: set to your mongodb database name.(Need have a created database.) +- `authSource`: set to your mongodb authSource. (authSource is specify the database name associated with the user's credentials, user need create in this database.) + +**kafka.yml** + +- `address`: set to your already kafka address or kafka Service name and port in your deployed. + +**redis.yml** + +- `address`: set to your already redis address or redis Service name and port in your deployed. + +**minio.yml** + +- `internalAddress`: set to your minio Service name and port in your deployed. +- `externalAddress`: set to your already expose minio external address. + +### Set the secret + +A Secret is an object that contains a small amount of sensitive data. Such as password and secret. Secret is similar to ConfigMaps. + +#### Redis: + +Update the `redis-password` value in `redis-secret.yml` to your Redis password encoded in base64. ```yaml -CPU: 4 -Memory: 8G -Disk: 100G +apiVersion: v1 +kind: Secret +metadata: + name: openim-redis-secret +type: Opaque +data: + redis-password: b3BlbklNMTIz # update to your redis password encoded in base64, if need empty, you can set to "" ``` -## Configuration File Generation +#### Mongo: -We have automated all the files, making the generation of configuration files optional for OpenIM. However, if you desire custom configurations, you can follow the steps below: +Update the `mongo_openim_username`, `mongo_openim_password` value in `mongo-secret.yml` to your Mongo username and password encoded in base64. -```bash -$ make init -# Alternatively, use script: -# ./scripts/init-config.sh +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: openim-mongo-secret +type: Opaque +data: + mongo_openim_username: b3BlbklN # update to your mongo username encoded in base64, if need empty, you can set to "" (this user credentials need in authSource database). + mongo_openim_password: b3BlbklNMTIz # update to your mongo password encoded in base64, if need empty, you can set to "" ``` -At this point, configuration files will be generated under `deployments/openim/config`, which you can modify as per your requirements. +#### Minio: -## Cluster Setup +Update the `minio-root-user` and `minio-root-password` value in `minio-secret.yml` to your MinIO accessKeyID and secretAccessKey encoded in base64. -If you already have a `kubernetes` cluster, or if you wish to build a `kubernetes` cluster from scratch, you can skip this step. - -For a quick start, I used [sealos](https://github.com/labring/sealos) to rapidly set up the cluster, with sealos also being a wrapper for kubeadm at its core: - -```bash -$ SEALOS_VERSION=`curl -s https://api.github.com/repos/labring/sealos/releases/latest | grep -oE '"tag_name": "[^"]+"' | head -n1 | cut -d'"' -f4` && \ - curl -sfL https://raw.githubusercontent.com/labring/sealos/${SEALOS_VERSION}/scripts/install.sh | - sh -s ${SEALOS_VERSION} labring/sealos +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: openim-minio-secret +type: Opaque +data: + minio-root-user: cm9vdA== # update to your minio accessKeyID encoded in base64, if need empty, you can set to "" + minio-root-password: b3BlbklNMTIz # update to your minio secretAccessKey encoded in base64, if need empty, you can set to "" ``` -**Supported Versions:** +#### Kafka: -+ docker: `labring/kubernetes-docker`:(v1.24.0~v1.27.0) -+ containerd: `labring/kubernetes`:(v1.24.0~v1.27.0) +Update the `kafka-password` value in `kafka-secret.yml` to your Kafka password encoded in base64. -#### Cluster Installation: - -Cluster details are as follows: - -| Hostname | IP Address | System Info | -| -------- | ---------- | ------------------------------------------------------------ | -| master01 | 10.0.0.9 | `Linux VM-0-9-ubuntu 5.15.0-76-generic #83-Ubuntu SMP Thu Jun 15 19:16:32 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux` | -| node01 | 10.0.0.4 | Similar to master01 | -| node02 | 10.0.0.10 | Similar to master01 | - -```bash -$ export CLUSTER_USERNAME=ubuntu -$ export CLUSTER_PASSWORD=123456 -$ sudo sealos run labring/kubernetes:v1.25.0 labring/helm:v3.8.2 labring/calico:v3.24.1 \ - --masters 10.0.0.9 \ - --nodes 10.0.0.4,10.0.0.10 \ - -u "$CLUSTER_USERNAME" \ - -p "$CLUSTER_PASSWORD" +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: openim-kafka-secret +type: Opaque +data: + kafka-password: b3BlbklNMTIz # update to your kafka password encoded in base64, if need empty, you can set to "" ``` -> **Node** Uninstallation method: using `kubeadm` for uninstallation does not remove `etcd` and `cni` related configurations. Manual clearance or using `sealos` for uninstallation is needed. +### Apply the secret. + +```shell +kubectl apply -f redis-secret.yml -f minio-secret.yml -f mongo-secret.yml -f kafka-secret.yml +``` + +### Apply all config + +`kubectl apply -f ./openim-config.yml` + +> Attation: If you use `default` namespace, you can excute `clusterRile.yml` to create a cluster role binding for default service account. > -> ```bash -> $ sealos reset -> ``` +> Namespace is modify to `discovery.yml` in `openim-config.yml`, you can change `kubernetes.namespace` to your namespace. -If you are local, you can also use Kind and Minikube to test, for example, using Kind: +**Excute `clusterRole.yml`** + +`kubectl apply -f ./clusterRole.yml` + +### run all deployments and services + +> Note: Ensure that infrastructure services like MinIO, Redis, and Kafka are running before deploying the main applications. ```bash -$ GO111MODULE="on" go get sigs.k8s.io/kind@v0.11.1 -$ kind create cluster +kubectl apply \ + -f openim-api-deployment.yml \ + -f openim-api-service.yml \ + -f openim-crontask-deployment.yml \ + -f openim-rpc-user-deployment.yml \ + -f openim-rpc-user-service.yml \ + -f openim-msggateway-deployment.yml \ + -f openim-msggateway-service.yml \ + -f openim-push-deployment.yml \ + -f openim-push-service.yml \ + -f openim-msgtransfer-service.yml \ + -f openim-msgtransfer-deployment.yml \ + -f openim-rpc-conversation-deployment.yml \ + -f openim-rpc-conversation-service.yml \ + -f openim-rpc-auth-deployment.yml \ + -f openim-rpc-auth-service.yml \ + -f openim-rpc-group-deployment.yml \ + -f openim-rpc-group-service.yml \ + -f openim-rpc-friend-deployment.yml \ + -f openim-rpc-friend-service.yml \ + -f openim-rpc-msg-deployment.yml \ + -f openim-rpc-msg-service.yml \ + -f openim-rpc-third-deployment.yml \ + -f openim-rpc-third-service.yml ``` -### Installing helm +### Verification -Helm simplifies the deployment and management of Kubernetes applications to a large extent by offering version control and release management through packaging. - -**Using Script:** +After deploying the services, verify that everything is running smoothly: ```bash -$ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash +# Check the status of all pods +kubectl get pods + +# Check the status of services +kubectl get svc + +# Check the status of deployments +kubectl get deployments + +# View all resources +kubectl get all ``` -**Adding Repository:** +### clean all -```bash -$ helm repo add brigade https://openimsdk.github.io/openim-charts -``` +`kubectl delete -f ./` -### OpenIM Image Strategy +### Notes: -Automated offerings include aliyun, ghcr, docker hub: [Image Documentation](https://github.com/openimsdk/open-im-server/blob/main/docs/contrib/images.md) - -**Local Test Build Method:** - -```bash -$ make image -``` - -> This command assists in quickly building the required images locally. For a detailed build strategy, refer to the [Build Documentation](https://github.com/openimsdk/open-im-server/blob/main/build/README.md). - -## Installation - -Explore our Helm-Charts repository and read through: [Helm-Charts Repository](https://github.com/openimsdk/helm-charts) - - -Using the helm charts repository, you can ignore the following configuration, but if you want to just use the server and scale on top of it, you can go ahead: - -**Use the Helm template to generate the deployment yaml file: `openim-charts.yaml`** - -**Gen Image:** - -```bash -../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/helm-image.yaml > ./charts/generated-configs/helm-image.yaml -``` - -**Gen Charts:** - -```bash -for chart in ./charts/*/; do - if [[ "$chart" == *"generated-configs"* || "$chart" == *"helmfile.yaml"* ]]; then - continue - fi - - if [ -f "${chart}values.yaml" ]; then - helm template "$chart" -f "./charts/generated-configs/helm-image.yaml" -f "./charts/generated-configs/config.yaml" -f "./charts/generated-configs/notification.yaml" >> openim-charts.yaml - else - helm template "$chart" >> openim-charts.yaml - fi -done -``` - -**Use Helmfile:** - -```bash -GO111MODULE=on go get github.com/roboll/helmfile@latest -``` - -```bash -export MONGO_ADDRESS=im-mongo -export MONGO_PORT=27017 -export REDIS_ADDRESS=im-redis-master -export REDIS_PORT=6379 -export KAFKA_ADDRESS=im-kafka -export KAFKA_PORT=9092 -export OBJECT_APIURL="https://openim.server.com/api" -export MINIO_ENDPOINT="http://im-minio:9000" -export MINIO_SIGN_ENDPOINT="https://openim.server.com/im-minio-api" - -mkdir ./charts/generated-configs -../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/config.yaml > ./charts/generated-configs/config.yaml -cp ../config/notification.yaml ./charts/generated-configs/notification.yaml -../scripts/genconfig.sh ../scripts/install/environment.sh ./templates/helm-image.yaml > ./charts/generated-configs/helm-image.yaml -``` - -```bash -helmfile apply -``` +- If you use a specific namespace for your deployment, be sure to append the -n flag to your kubectl commands. diff --git a/deployments/deploy/kafka-secret.yml b/deployments/deploy/kafka-secret.yml new file mode 100644 index 000000000..dcee689c8 --- /dev/null +++ b/deployments/deploy/kafka-secret.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openim-kafka-secret +type: Opaque +data: + kafka-password: "" diff --git a/deployments/deploy/minio-secret.yml b/deployments/deploy/minio-secret.yml new file mode 100644 index 000000000..3ea09a19f --- /dev/null +++ b/deployments/deploy/minio-secret.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openim-minio-secret +type: Opaque +data: + minio-root-user: cm9vdA== # Base64 encoded "root" + minio-root-password: b3BlbklNMTIz # Base64 encoded "openIM123" diff --git a/deployments/deploy/minio-statefulset.yml b/deployments/deploy/minio-statefulset.yml new file mode 100644 index 000000000..9cf0a42d0 --- /dev/null +++ b/deployments/deploy/minio-statefulset.yml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio + labels: + app: minio +spec: + replicas: 2 + selector: + matchLabels: + app: minio + template: + metadata: + labels: + app: minio + spec: + containers: + - name: minio + image: minio/minio:RELEASE.2024-01-11T07-46-16Z + ports: + - containerPort: 9000 # MinIO service port + - containerPort: 9090 # MinIO console port + volumeMounts: + - name: minio-data + mountPath: /data + - name: minio-config + mountPath: /root/.minio + env: + - name: TZ + value: "Asia/Shanghai" + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: openim-minio-secret + key: minio-root-user + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: openim-minio-secret + key: minio-root-password + command: + - "/bin/sh" + - "-c" + - | + mkdir -p /data && \ + minio server /data --console-address ":9090" + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: minio-pvc + - name: minio-config + persistentVolumeClaim: + claimName: minio-config-pvc + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-config-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + + diff --git a/deployments/deploy/mongo-secret.yml b/deployments/deploy/mongo-secret.yml new file mode 100644 index 000000000..c3c10af24 --- /dev/null +++ b/deployments/deploy/mongo-secret.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openim-mongo-secret +type: Opaque +data: + mongo_openim_username: b3BlbklN # base64 for "openIM", this user credentials need in authSource database. + mongo_openim_password: b3BlbklNMTIz # base64 for "openIM123" diff --git a/deployments/deploy/mongo-statefulset.yml b/deployments/deploy/mongo-statefulset.yml new file mode 100644 index 000000000..41cd4cb7f --- /dev/null +++ b/deployments/deploy/mongo-statefulset.yml @@ -0,0 +1,108 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mongo-statefulset +spec: + serviceName: "mongo" + replicas: 2 + selector: + matchLabels: + app: mongo + template: + metadata: + labels: + app: mongo + spec: + containers: + - name: mongo + image: mongo:7.0 + command: ["/bin/bash", "-c"] + args: + - > + docker-entrypoint.sh mongod --wiredTigerCacheSizeGB ${wiredTigerCacheSizeGB} --auth & + until mongosh -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} --authenticationDatabase admin --eval "db.runCommand({ ping: 1 })" &>/dev/null; do + echo "Waiting for MongoDB to start..."; + sleep 1; + done && + mongosh -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} --authenticationDatabase admin --eval " + db = db.getSiblingDB(\"${MONGO_INITDB_DATABASE}\"); + if (!db.getUser(\"${MONGO_OPENIM_USERNAME}\")) { + db.createUser({ + user: \"${MONGO_OPENIM_USERNAME}\", + pwd: \"${MONGO_OPENIM_PASSWORD}\", + roles: [{role: \"readWrite\", db: \"${MONGO_INITDB_DATABASE}\"}] + }); + print(\"User created successfully: \"); + print(\"Username: ${MONGO_OPENIM_USERNAME}\"); + print(\"Password: ${MONGO_OPENIM_PASSWORD}\"); + print(\"Database: ${MONGO_INITDB_DATABASE}\"); + } else { + print(\"User already exists in database: ${MONGO_INITDB_DATABASE}, Username: ${MONGO_OPENIM_USERNAME}\"); + } + " && + tail -f /dev/null + ports: + - containerPort: 27017 + env: + - name: MONGO_INITDB_ROOT_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-init-secret + key: mongo_initdb_root_username + - name: MONGO_INITDB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-init-secret + key: mongo_initdb_root_password + - name: MONGO_INITDB_DATABASE + valueFrom: + secretKeyRef: + name: openim-mongo-init-secret + key: mongo_initdb_database + - name: MONGO_OPENIM_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-init-secret + key: mongo_openim_username + - name: MONGO_OPENIM_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-init-secret + key: mongo_openim_password + - name: TZ + value: "Asia/Shanghai" + - name: wiredTigerCacheSizeGB + value: "1" + volumeMounts: + - name: mongo-storage + mountPath: /data/db + + volumes: + - name: mongo-storage + persistentVolumeClaim: + claimName: mongo-pvc + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mongo-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + +--- +apiVersion: v1 +kind: Secret +metadata: + name: openim-mongo-init-secret +type: Opaque +data: + mongo_initdb_root_username: cm9vdA== # base64 for "root" + mongo_initdb_root_password: b3BlbklNMTIz # base64 for "openIM123" + mongo_initdb_database: b3BlbmltX3Yz # base64 for "openim_v3" + mongo_openim_username: b3BlbklN # base64 for "openIM" + mongo_openim_password: b3BlbklNMTIz # base64 for "openIM123" diff --git a/deployments/deploy/openim-api-deployment.yml b/deployments/deploy/openim-api-deployment.yml new file mode 100644 index 000000000..d2d27dc0c --- /dev/null +++ b/deployments/deploy/openim-api-deployment.yml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openim-api +spec: + replicas: 2 + selector: + matchLabels: + app: openim-api + template: + metadata: + labels: + app: openim-api + spec: + containers: + - name: openim-api-container + image: openim/openim-api:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10002 + - containerPort: 12002 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-config.yml b/deployments/deploy/openim-config.yml new file mode 100644 index 000000000..105dd98e3 --- /dev/null +++ b/deployments/deploy/openim-config.yml @@ -0,0 +1,1056 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: openim-config +data: + discovery.yml: | + enable: "kubernetes" # "kubernetes" or "etcd" + kubernetes: + namespace: default + etcd: + rootDirectory: openim + address: [ localhost:12379 ] + username: '' + password: '' + + rpcService: + user: user-rpc-service + friend: friend-rpc-service + msg: msg-rpc-service + push: push-rpc-service + messageGateway: messagegateway-rpc-service + group: group-rpc-service + auth: auth-rpc-service + conversation: conversation-rpc-service + third: third-rpc-service + + log.yml: | + # Log storage path, default is acceptable, change to a full path if modification is needed + storageLocation: ./logs/ + # Log rotation period (in hours), default is acceptable + rotationTime: 24 + # Number of log files to retain, default is acceptable + remainRotationCount: 2 + # Log level settings: 3 for production environment; 6 for more verbose logging in debugging environments + remainLogLevel: 6 + # Whether to output to standard output, default is acceptable + isStdout: true + # Whether to log in JSON format, default is acceptable + isJson: false + # output simplify log when KeyAndValues's value len is bigger than 50 in rpc method log + isSimplify: true + + mongodb.yml: | + # URI for database connection, leave empty if using address and credential settings directly + uri: '' + # List of MongoDB server addresses + address: [ mongo-service:37017 ] + # Name of the database + database: openim_v3 + # Username for database authentication + username: '' # openIM + # Password for database authentication + password: '' # openIM123 + # Authentication source for database authentication, if use root user, set it to admin + authSource: openim_v3 + # Maximum number of connections in the connection pool + maxPoolSize: 100 + # Maximum number of retry attempts for a failed database connection + maxRetry: 10 + + local-cache.yml: | + user: + topic: DELETE_CACHE_USER + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + group: + topic: DELETE_CACHE_GROUP + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + friend: + topic: DELETE_CACHE_FRIEND + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + conversation: + topic: DELETE_CACHE_CONVERSATION + slotNum: 100 + slotSize: 2000 + successExpire: 300 + failedExpire: 5 + + openim-api.yml: | + api: + # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, default is recommended + listenIP: 0.0.0.0 + # Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports + ports: [ 10002 ] + # API compression level; 0: default compression, 1: best compression, 2: best speed, -1: no compression + compressionLevel: 0 + + prometheus: + # Whether to enable prometheus + enable: true + # Prometheus listening ports, must match the number of api.ports + ports: [ 12002 ] + # This address can be accessed via a browser + grafanaURL: http://127.0.0.1:13000/ + + openim-rpc-user.yml: | + rpc: + # API or other RPCs can access this RPC through this IP; if left blank, the internal network IP is obtained by default + registerIP: + # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10320 ] + prometheus: + # Whether to enable prometheus + enable: true + # Prometheus listening ports, must be consistent with the number of rpc.ports + ports: [ 12320 ] + + openim-crontask.yml: | + cronExecuteTime: 0 2 * * * + retainChatRecords: 365 + fileExpireTime: 180 + deleteObjectType: ["msg-picture","msg-file", "msg-voice","msg-video","msg-video-snapshot","sdklog"] + + openim-msggateway.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10140 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12140 ] + + # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + + longConnSvr: + # WebSocket listening ports, must match the number of rpc.ports + ports: [ 10001 ] + # Maximum number of WebSocket connections + websocketMaxConnNum: 100000 + # Maximum length of the entire WebSocket message packet + websocketMaxMsgLen: 4096 + # WebSocket connection handshake timeout in seconds + websocketTimeout: 10 + + openim-msgtransfer.yml: | + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly + # Because four instances have been launched, four ports need to be specified + ports: [ 12020 ] + + openim-push.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10170 ] + + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12170 ] + + maxConcurrentWorkers: 3 + #Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified. + enable: + geTui: + pushUrl: https://restapi.getui.com/v2/$appId + masterSecret: + appKey: + intent: + channelID: + channelName: + fcm: + # Prioritize using file paths. If the file path is empty, use URL + filePath: # File path is concatenated with the parameters passed in through - c(`mage` default pass in `config/`) and filePath. + authURL: # Must start with https or http. + jpush: + appKey: + masterSecret: + pushURL: + pushIntent: + + # iOS system push sound and badge count + iosPush: + pushSound: xxx + badgeCount: true + production: false + + fullUserCache: true + + openim-rpc-auth.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10200 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [12200] + + tokenPolicy: + # Token validity period, in days + expire: 90 + + openim-rpc-conversation.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10220 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12200 ] + + tokenPolicy: + # Token validity period, in days + expire: 90 + + openim-rpc-friend.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10240 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12240 ] + + openim-rpc-group.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10260 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12260 ] + + enableHistoryForNewMembers: true + + openim-rpc-msg.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + ports: [ 10280 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12280 ] + + + # Does sending messages require friend verification + friendVerify: false + + openim-rpc-third.yml: | + rpc: + # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP + registerIP: + # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP + listenIP: 0.0.0.0 + # autoSetPorts indicates whether to automatically set the ports + # if you use in kubernetes, set it to false + autoSetPorts: false + # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports + # It will only take effect when autoSetPorts is set to false. + ports: [ 10300 ] + + prometheus: + # Enable or disable Prometheus monitoring + enable: true + # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup + ports: [ 12300 ] + + + object: + # Use MinIO as object storage, or set to "cos", "oss", "kodo", "aws", while also configuring the corresponding settings + enable: minio + cos: + bucketURL: https://temp-1252357374.cos.ap-chengdu.myqcloud.com + secretID: + secretKey: + sessionToken: + publicRead: false + oss: + endpoint: https://oss-cn-chengdu.aliyuncs.com + bucket: demo-9999999 + bucketURL: https://demo-9999999.oss-cn-chengdu.aliyuncs.com + accessKeyID: + accessKeySecret: + sessionToken: + publicRead: false + kodo: + endpoint: http://s3.cn-south-1.qiniucs.com + bucket: kodo-bucket-test + bucketURL: http://kodo-bucket-test-oetobfb.qiniudns.com + accessKeyID: + accessKeySecret: + sessionToken: + publicRead: false + aws: + region: ap-southeast-2 + bucket: testdemo832234 + accessKeyID: + secretAccessKey: + sessionToken: + publicRead: false + + share.yml: | + secret: openIM123 + + imAdminUserID: ["imAdmin"] + + # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time + multiLogin: + policy: 1 + maxNumOneEnd: 30 + + kafka.yml: | + # Username for authentication + username: '' + # Password for authentication + password: '' + # Producer acknowledgment settings + producerAck: + # Compression type to use (e.g., none, gzip, snappy) + compressType: none + # List of Kafka broker addresses + address: [ "kafka-service:19094" ] + # Kafka topic for Redis integration + toRedisTopic: toRedis + # Kafka topic for MongoDB integration + toMongoTopic: toMongo + # Kafka topic for push notifications + toPushTopic: toPush + # Kafka topic for offline push notifications + toOfflinePushTopic: toOfflinePush + # Consumer group ID for Redis topic + toRedisGroupID: redis + # Consumer group ID for MongoDB topic + toMongoGroupID: mongo + # Consumer group ID for push notifications topic + toPushGroupID: push + # Consumer group ID for offline push notifications topic + toOfflinePushGroupID: offlinePush + # TLS (Transport Layer Security) configuration + tls: + # Enable or disable TLS + enableTLS: false + # CA certificate file path + caCrt: + # Client certificate file path + clientCrt: + # Client key file path + clientKey: + # Client key password + clientKeyPwd: + # Whether to skip TLS verification (not recommended for production) + insecureSkipVerify: false + + redis.yml: | + address: [ "redis-service:16379" ] + username: + password: # openIM123 + clusterMode: false + db: 0 + maxRetry: 10 + poolSize: 100 + + minio.yml: | + # Name of the bucket in MinIO + bucket: openim + # Access key ID for MinIO authentication + accessKeyID: root + # Secret access key for MinIO authentication + secretAccessKey: # openIM123 + # Session token for MinIO authentication (optional) + sessionToken: + # Internal address of the MinIO server + internalAddress: minio-service:10005 + # External address of the MinIO server, accessible from outside. Supports both HTTP and HTTPS using a domain name + externalAddress: http://minio-service:10005 + # Flag to enable or disable public read access to the bucket + publicRead: "false" + + notification.yml: | + groupCreated: + isSendMsg: true + # Reliability level of the message sending. + # Set to 1 to send only when online, 2 for guaranteed delivery. + reliabilityLevel: 1 + # This setting is effective only when 'isSendMsg' is true. + # It controls whether to count unread messages. + unreadCount: false + # Configuration for offline push notifications. + offlinePush: + # Enables or disables offline push notifications. + enable: false + # Title for the notification when a group is created. + title: create group title + # Description for the notification. + desc: create group desc + # Additional information for the notification. + ext: create group ext + + groupInfoSet: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupInfoSet title + desc: groupInfoSet desc + ext: groupInfoSet ext + + joinGroupApplication: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: joinGroupApplication title + desc: joinGroupApplication desc + ext: joinGroupApplication ext + + memberQuit: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: memberQuit title + desc: memberQuit desc + ext: memberQuit ext + + groupApplicationAccepted: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupApplicationAccepted title + desc: groupApplicationAccepted desc + ext: groupApplicationAccepted ext + + groupApplicationRejected: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupApplicationRejected title + desc: groupApplicationRejected desc + ext: groupApplicationRejected ext + + groupOwnerTransferred: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupOwnerTransferred title + desc: groupOwnerTransferred desc + ext: groupOwnerTransferred ext + + memberKicked: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: memberKicked title + desc: memberKicked desc + ext: memberKicked ext + + memberInvited: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: memberInvited title + desc: memberInvited desc + ext: memberInvited ext + + memberEnter: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: memberEnter title + desc: memberEnter desc + ext: memberEnter ext + + groupDismissed: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupDismissed title + desc: groupDismissed desc + ext: groupDismissed ext + + groupMuted: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupMuted title + desc: groupMuted desc + ext: groupMuted ext + + groupCancelMuted: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupCancelMuted title + desc: groupCancelMuted desc + ext: groupCancelMuted ext + defaultTips: + tips: group Cancel Muted + + groupMemberMuted: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupMemberMuted title + desc: groupMemberMuted desc + ext: groupMemberMuted ext + + groupMemberCancelMuted: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupMemberCancelMuted title + desc: groupMemberCancelMuted desc + ext: groupMemberCancelMuted ext + + groupMemberInfoSet: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupMemberInfoSet title + desc: groupMemberInfoSet desc + ext: groupMemberInfoSet ext + + groupInfoSetAnnouncement: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupInfoSetAnnouncement title + desc: groupInfoSetAnnouncement desc + ext: groupInfoSetAnnouncement ext + + groupInfoSetName: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: groupInfoSetName title + desc: groupInfoSetName desc + ext: groupInfoSetName ext + + #############################friend################################# + friendApplicationAdded: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: Somebody applies to add you as a friend + desc: Somebody applies to add you as a friend + ext: Somebody applies to add you as a friend + + friendApplicationApproved: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: Someone applies to add your friend application + desc: Someone applies to add your friend application + ext: Someone applies to add your friend application + + friendApplicationRejected: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: Someone rejected your friend application + desc: Someone rejected your friend application + ext: Someone rejected your friend application + + friendAdded: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: We have become friends + desc: We have become friends + ext: We have become friends + + friendDeleted: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: deleted a friend + desc: deleted a friend + ext: deleted a friend + + friendRemarkSet: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: Your friend's profile has been changed + desc: Your friend's profile has been changed + ext: Your friend's profile has been changed + + blackAdded: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: blocked a user + desc: blocked a user + ext: blocked a user + + blackDeleted: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: Remove a blocked user + desc: Remove a blocked user + ext: Remove a blocked user + + friendInfoUpdated: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: friend info updated + desc: friend info updated + ext: friend info updated + + #####################user######################### + userInfoUpdated: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: userInfo updated + desc: userInfo updated + ext: userInfo updated + + userStatusChanged: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: false + title: user status changed + desc: user status changed + ext: user status changed + + #####################conversation######################### + conversationChanged: + isSendMsg: false + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: conversation changed + desc: conversation changed + ext: conversation changed + + conversationSetPrivate: + isSendMsg: true + reliabilityLevel: 1 + unreadCount: false + offlinePush: + enable: true + title: burn after reading + desc: burn after reading + ext: burn after reading + + webhooks.yml: | + url: http://127.0.0.1:10006/callbackExample + beforeSendSingleMsg: + enable: false + timeout: 5 + failedContinue: true + # Only the contentType in allowedTypes will send the callback. + # Supports two formats: a single type or a range. The range is defined by the lower and upper bounds connected with a hyphen ("-"). + # e.g. allowedTypes: [1, 100, 200-500, 600-700] means that only contentType within the range + # {1, 100} ∪ [200, 500] ∪ [600, 700] will be allowed through the filter. + # If not set, all contentType messages will through this filter. + allowedTypes: [] + # Only the contentType not in deniedTypes will send the callback. + # Supports two formats, same as allowedTypes. + # If not set, all contentType messages will through this filter. + deniedTypes: [] + beforeUpdateUserInfoEx: + enable: false + timeout: 5 + failedContinue: true + afterUpdateUserInfoEx: + enable: false + timeout: 5 + afterSendSingleMsg: + enable: false + timeout: 5 + # Only the senID/recvID specified in attentionIds will send the callback + # if not set, all user messages will be callback + attentionIds: [] + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] + beforeSendGroupMsg: + enable: false + timeout: 5 + failedContinue: true + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] + beforeMsgModify: + enable: false + timeout: 5 + failedContinue: true + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] + afterSendGroupMsg: + enable: false + timeout: 5 + # See beforeSendSingleMsg comment. + allowedTypes: [] + deniedTypes: [] + afterUserOnline: + enable: false + timeout: 5 + afterUserOffline: + enable: false + timeout: 5 + afterUserKickOff: + enable: false + timeout: 5 + beforeOfflinePush: + enable: false + timeout: 5 + failedContinue: true + beforeOnlinePush: + enable: false + timeout: 5 + failedContinue: true + beforeGroupOnlinePush: + enable: false + timeout: 5 + failedContinue: true + beforeAddFriend: + enable: false + timeout: 5 + failedContinue: true + beforeUpdateUserInfo: + enable: false + timeout: 5 + failedContinue: true + afterUpdateUserInfo: + enable: false + timeout: 5 + beforeCreateGroup: + enable: false + timeout: 5 + failedContinue: true + afterCreateGroup: + enable: false + timeout: 5 + beforeMemberJoinGroup: + enable: false + timeout: 5 + failedContinue: true + beforeSetGroupMemberInfo: + enable: false + timeout: 5 + failedContinue: true + afterSetGroupMemberInfo: + enable: false + timeout: 5 + afterQuitGroup: + enable: false + timeout: 5 + afterKickGroupMember: + enable: false + timeout: 5 + afterDismissGroup: + enable: false + timeout: 5 + beforeApplyJoinGroup: + enable: false + timeout: 5 + failedContinue: true + afterGroupMsgRead: + enable: false + timeout: 5 + afterSingleMsgRead: + enable: false + timeout: 5 + beforeUserRegister: + enable: false + timeout: 5 + failedContinue: true + afterUserRegister: + enable: false + timeout: 5 + afterTransferGroupOwner: + enable: false + timeout: 5 + beforeSetFriendRemark: + enable: false + timeout: 5 + failedContinue: true + afterSetFriendRemark: + enable: false + timeout: 5 + afterGroupMsgRevoke: + enable: false + timeout: 5 + afterJoinGroup: + enable: false + timeout: 5 + beforeInviteUserToGroup: + enable: false + timeout: 5 + failedContinue: true + afterSetGroupInfo: + enable: false + timeout: 5 + beforeSetGroupInfo: + enable: false + timeout: 5 + failedContinue: true + afterSetGroupInfoEx: + enable: false + timeout: 5 + beforeSetGroupInfoEx: + enable: false + timeout: 5 + failedContinue: true + afterRevokeMsg: + enable: false + timeout: 5 + beforeAddBlack: + enable: false + timeout: 5 + failedContinue: + afterAddFriend: + enable: false + timeout: 5 + beforeAddFriendAgree: + enable: false + timeout: 5 + failedContinue: true + afterAddFriendAgree: + enable: false + timeout: 5 + afterDeleteFriend: + enable: false + timeout: 5 + beforeImportFriends: + enable: false + timeout: 5 + failedContinue: true + afterImportFriends: + enable: false + timeout: 5 + afterRemoveBlack: + enable: false + timeout: 5 + + prometheus.yml: | + # my global config + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: [internal_ip:19093] + + # Load rules once and periodically evaluate them according to the global evaluation_interval. + rule_files: + - instance-down-rules.yml + # - first_rules.yml + # - second_rules.yml + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label "job=job_name" to any timeseries scraped from this config. + # Monitored information captured by prometheus + + # prometheus fetches application services + - job_name: node_exporter + static_configs: + - targets: [ internal_ip:20500 ] + - job_name: openimserver-openim-api + static_configs: + - targets: [ internal_ip:12002 ] + labels: + namespace: default + - job_name: openimserver-openim-msggateway + static_configs: + - targets: [ internal_ip:12140 ] + # - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ] + labels: + namespace: default + - job_name: openimserver-openim-msgtransfer + static_configs: + - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ] + # - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ] + labels: + namespace: default + - job_name: openimserver-openim-push + static_configs: + - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ] + # - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185, internal_ip:12186 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-auth + static_configs: + - targets: [ internal_ip:12200 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-conversation + static_configs: + - targets: [ internal_ip:12220 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-friend + static_configs: + - targets: [ internal_ip:12240 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-group + static_configs: + - targets: [ internal_ip:12260 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-msg + static_configs: + - targets: [ internal_ip:12280 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-third + static_configs: + - targets: [ internal_ip:12300 ] + labels: + namespace: default + - job_name: openimserver-openim-rpc-user + static_configs: + - targets: [ internal_ip:12320 ] + labels: + namespace: default diff --git a/deployments/deploy/openim-msggateway-deployment.yml b/deployments/deploy/openim-msggateway-deployment.yml new file mode 100644 index 000000000..b1a142e23 --- /dev/null +++ b/deployments/deploy/openim-msggateway-deployment.yml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: messagegateway-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: messagegateway-rpc-server + template: + metadata: + labels: + app: messagegateway-rpc-server + spec: + containers: + - name: openim-msggateway-container + image: openim/openim-msggateway:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10140 + - containerPort: 12001 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-msgtransfer-deployment.yml b/deployments/deploy/openim-msgtransfer-deployment.yml new file mode 100644 index 000000000..323ed5660 --- /dev/null +++ b/deployments/deploy/openim-msgtransfer-deployment.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openim-msgtransfer-server +spec: + replicas: 2 + selector: + matchLabels: + app: openim-msgtransfer-server + template: + metadata: + labels: + app: openim-msgtransfer-server + spec: + containers: + - name: openim-msgtransfer-container + image: openim/openim-msgtransfer:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + - name: IMENV_KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: openim-kafka-secret + key: kafka-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 12020 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-push-deployment.yml b/deployments/deploy/openim-push-deployment.yml new file mode 100644 index 000000000..bb36170e9 --- /dev/null +++ b/deployments/deploy/openim-push-deployment.yml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: push-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: push-rpc-server + template: + metadata: + labels: + app: push-rpc-server + spec: + containers: + - name: push-rpc-server-container + image: openim/openim-push:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: openim-kafka-secret + key: kafka-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10170 + - containerPort: 12170 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-auth-deployment.yml b/deployments/deploy/openim-rpc-auth-deployment.yml new file mode 100644 index 000000000..a15c901f5 --- /dev/null +++ b/deployments/deploy/openim-rpc-auth-deployment.yml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auth-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: auth-rpc-server + template: + metadata: + labels: + app: auth-rpc-server + spec: + containers: + - name: auth-rpc-server-container + image: openim/openim-rpc-auth:v3.8.3 + imagePullPolicy: Never + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10200 + - containerPort: 12200 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-conversation-deployment.yml b/deployments/deploy/openim-rpc-conversation-deployment.yml new file mode 100644 index 000000000..2c9bde337 --- /dev/null +++ b/deployments/deploy/openim-rpc-conversation-deployment.yml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: conversation-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: conversation-rpc-server + template: + metadata: + labels: + app: conversation-rpc-server + spec: + containers: + - name: conversation-rpc-server-container + image: openim/openim-rpc-conversation:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10220 + - containerPort: 12220 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-friend-deployment.yml b/deployments/deploy/openim-rpc-friend-deployment.yml new file mode 100644 index 000000000..e01238888 --- /dev/null +++ b/deployments/deploy/openim-rpc-friend-deployment.yml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: friend-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: friend-rpc-server + template: + metadata: + labels: + app: friend-rpc-server + spec: + containers: + - name: friend-rpc-server-container + image: openim/openim-rpc-friend:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10240 + - containerPort: 12240 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-group-deployment.yml b/deployments/deploy/openim-rpc-group-deployment.yml new file mode 100644 index 000000000..4698d60b5 --- /dev/null +++ b/deployments/deploy/openim-rpc-group-deployment.yml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: group-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: group-rpc-server + template: + metadata: + labels: + app: group-rpc-server + spec: + containers: + - name: group-rpc-server-container + image: openim/openim-rpc-group:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10260 + - containerPort: 12260 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-msg-deployment.yml b/deployments/deploy/openim-rpc-msg-deployment.yml new file mode 100644 index 000000000..26a833342 --- /dev/null +++ b/deployments/deploy/openim-rpc-msg-deployment.yml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: msg-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: msg-rpc-server + template: + metadata: + labels: + app: msg-rpc-server + spec: + containers: + - name: msg-rpc-server-container + image: openim/openim-rpc-msg:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + - name: IMENV_KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: openim-kafka-secret + key: kafka-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10280 + - containerPort: 12280 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-third-deployment.yml b/deployments/deploy/openim-rpc-third-deployment.yml new file mode 100644 index 000000000..f6919f510 --- /dev/null +++ b/deployments/deploy/openim-rpc-third-deployment.yml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: third-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: third-rpc-server + template: + metadata: + labels: + app: third-rpc-server + spec: + containers: + - name: third-rpc-server-container + image: openim/openim-rpc-third:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_MINIO_ACCESSKEYID + valueFrom: + secretKeyRef: + name: openim-minio-secret + key: minio-root-user + - name: IMENV_MINIO_SECRETACCESSKEY + valueFrom: + secretKeyRef: + name: openim-minio-secret + key: minio-root-password + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10300 + - containerPort: 12300 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/openim-rpc-user-deployment.yml b/deployments/deploy/openim-rpc-user-deployment.yml new file mode 100644 index 000000000..c3e36d1be --- /dev/null +++ b/deployments/deploy/openim-rpc-user-deployment.yml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: user-rpc-server +spec: + replicas: 2 + selector: + matchLabels: + app: user-rpc-server + template: + metadata: + labels: + app: user-rpc-server + spec: + containers: + - name: user-rpc-server-container + image: openim/openim-rpc-user:v3.8.3 + env: + - name: CONFIG_PATH + value: "/config" + - name: IMENV_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: openim-redis-secret + key: redis-password + - name: IMENV_MONGODB_USERNAME + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_username + - name: IMENV_MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name: openim-mongo-secret + key: mongo_openim_password + - name: IMENV_KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: openim-kafka-secret + key: kafka-password + volumeMounts: + - name: openim-config + mountPath: "/config" + readOnly: true + ports: + - containerPort: 10320 + - containerPort: 12320 + volumes: + - name: openim-config + configMap: + name: openim-config diff --git a/deployments/deploy/redis-secret.yml b/deployments/deploy/redis-secret.yml new file mode 100644 index 000000000..463ec9545 --- /dev/null +++ b/deployments/deploy/redis-secret.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openim-redis-secret +type: Opaque +data: + redis-password: b3BlbklNMTIz # "openIM123" in base64 diff --git a/deployments/deploy/redis-statefulset.yml b/deployments/deploy/redis-statefulset.yml new file mode 100644 index 000000000..5668b20cc --- /dev/null +++ b/deployments/deploy/redis-statefulset.yml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis-statefulset +spec: + serviceName: "redis" + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: redis:7.0.0 + ports: + - containerPort: 6379 + env: + - name: TZ + value: "Asia/Shanghai" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secret + key: redis-password + volumeMounts: + - name: redis-data + mountPath: /data + command: + [ + "/bin/sh", + "-c", + 'redis-server --requirepass "$REDIS_PASSWORD" --appendonly yes', + ] + volumes: + - name: redis-config-volume + configMap: + name: openim-config + - name: redis-data + persistentVolumeClaim: + claimName: redis-pvc +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: redis-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi diff --git a/go.mod b/go.mod index ab366e8bf..f0fdbaf7d 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/openimsdk/open-im-server/v3 go 1.22.7 -toolchain go1.23.2 - require ( firebase.google.com/go/v4 v4.14.1 github.com/dtm-labs/rockscache v0.1.1 @@ -14,8 +12,8 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.72-alpha.63 - github.com/openimsdk/tools v0.0.50-alpha.50 + github.com/openimsdk/protocol v0.0.72-alpha.68 + github.com/openimsdk/tools v0.0.50-alpha.62 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 @@ -44,7 +42,6 @@ require ( github.com/spf13/viper v1.18.2 github.com/stathat/consistent v1.0.0 go.uber.org/automaxprocs v1.5.3 - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/sync v0.8.0 ) @@ -175,6 +172,7 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.7.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/image v0.15.0 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect diff --git a/go.sum b/go.sum index 5d38b8ad4..b91394484 100644 --- a/go.sum +++ b/go.sum @@ -319,10 +319,10 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.72-alpha.63 h1:IyPBibEvwBtTmD8DSrlqcekfEXe74k4+KeeHsgdhGh0= -github.com/openimsdk/protocol v0.0.72-alpha.63/go.mod h1:Iet+piS/jaS+kWWyj6EEr36mk4ISzIRYjoMSVA4dq2M= -github.com/openimsdk/tools v0.0.50-alpha.50 h1:+naDlvHcqJDj2NsCGnQd1LLQOET5IRPbrtmWbM/o7JQ= -github.com/openimsdk/tools v0.0.50-alpha.50/go.mod h1:muCtxguNJv8lFwLei27UASu2Nvg4ERSeN0R4K5tivk0= +github.com/openimsdk/protocol v0.0.72-alpha.68 h1:Ekn6S9Ftt12Xs/p9kJ39RDr2gSwIczz+MmSHQE4lAek= +github.com/openimsdk/protocol v0.0.72-alpha.68/go.mod h1:Iet+piS/jaS+kWWyj6EEr36mk4ISzIRYjoMSVA4dq2M= +github.com/openimsdk/tools v0.0.50-alpha.62 h1:e/m1XL7+EXbkOoxr/En/612WcOPKOUHPBj0++gG6MuQ= +github.com/openimsdk/tools v0.0.50-alpha.62/go.mod h1:JowL2jYr8tu4vcQe+5hJh4v3BtSx1T0CIS3pgU/Mw+U= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= diff --git a/internal/api/auth.go b/internal/api/auth.go index f41b530bf..92d911b71 100644 --- a/internal/api/auth.go +++ b/internal/api/auth.go @@ -16,29 +16,30 @@ package api import ( "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/auth" "github.com/openimsdk/tools/a2r" ) -type AuthApi rpcclient.Auth +type AuthApi struct { + Client auth.AuthClient +} -func NewAuthApi(client rpcclient.Auth) AuthApi { - return AuthApi(client) +func NewAuthApi(client auth.AuthClient) AuthApi { + return AuthApi{client} } func (o *AuthApi) GetAdminToken(c *gin.Context) { - a2r.Call(auth.AuthClient.GetAdminToken, o.Client, c) + a2r.Call(c, auth.AuthClient.GetAdminToken, o.Client) } func (o *AuthApi) GetUserToken(c *gin.Context) { - a2r.Call(auth.AuthClient.GetUserToken, o.Client, c) + a2r.Call(c, auth.AuthClient.GetUserToken, o.Client) } func (o *AuthApi) ParseToken(c *gin.Context) { - a2r.Call(auth.AuthClient.ParseToken, o.Client, c) + a2r.Call(c, auth.AuthClient.ParseToken, o.Client) } func (o *AuthApi) ForceLogout(c *gin.Context) { - a2r.Call(auth.AuthClient.ForceLogout, o.Client, c) + a2r.Call(c, auth.AuthClient.ForceLogout, o.Client) } diff --git a/internal/api/conversation.go b/internal/api/conversation.go index 8e3a3ca82..f7dbc133c 100644 --- a/internal/api/conversation.go +++ b/internal/api/conversation.go @@ -16,57 +16,58 @@ package api import ( "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/tools/a2r" ) -type ConversationApi rpcclient.Conversation +type ConversationApi struct { + Client conversation.ConversationClient +} -func NewConversationApi(client rpcclient.Conversation) ConversationApi { - return ConversationApi(client) +func NewConversationApi(client conversation.ConversationClient) ConversationApi { + return ConversationApi{client} } func (o *ConversationApi) GetAllConversations(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetAllConversations, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetAllConversations, o.Client) } func (o *ConversationApi) GetSortedConversationList(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetSortedConversationList, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetSortedConversationList, o.Client) } func (o *ConversationApi) GetConversation(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetConversation, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetConversation, o.Client) } func (o *ConversationApi) GetConversations(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetConversations, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetConversations, o.Client) } func (o *ConversationApi) SetConversations(c *gin.Context) { - a2r.Call(conversation.ConversationClient.SetConversations, o.Client, c) + a2r.Call(c, conversation.ConversationClient.SetConversations, o.Client) } func (o *ConversationApi) GetConversationOfflinePushUserIDs(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetConversationOfflinePushUserIDs, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetConversationOfflinePushUserIDs, o.Client) } func (o *ConversationApi) GetFullOwnerConversationIDs(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetFullOwnerConversationIDs, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetFullOwnerConversationIDs, o.Client) } func (o *ConversationApi) GetIncrementalConversation(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetIncrementalConversation, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetIncrementalConversation, o.Client) } func (o *ConversationApi) GetOwnerConversation(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetOwnerConversation, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetOwnerConversation, o.Client) } func (o *ConversationApi) GetNotNotifyConversationIDs(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetNotNotifyConversationIDs, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetNotNotifyConversationIDs, o.Client) } func (o *ConversationApi) GetPinnedConversationIDs(c *gin.Context) { - a2r.Call(conversation.ConversationClient.GetPinnedConversationIDs, o.Client, c) + a2r.Call(c, conversation.ConversationClient.GetPinnedConversationIDs, o.Client) } diff --git a/internal/api/friend.go b/internal/api/friend.go index d000cccdd..7d84ff0dc 100644 --- a/internal/api/friend.go +++ b/internal/api/friend.go @@ -17,99 +17,100 @@ package api import ( "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/relation" "github.com/openimsdk/tools/a2r" ) -type FriendApi rpcclient.Friend +type FriendApi struct { + Client relation.FriendClient +} -func NewFriendApi(client rpcclient.Friend) FriendApi { - return FriendApi(client) +func NewFriendApi(client relation.FriendClient) FriendApi { + return FriendApi{client} } func (o *FriendApi) ApplyToAddFriend(c *gin.Context) { - a2r.Call(relation.FriendClient.ApplyToAddFriend, o.Client, c) + a2r.Call(c, relation.FriendClient.ApplyToAddFriend, o.Client) } func (o *FriendApi) RespondFriendApply(c *gin.Context) { - a2r.Call(relation.FriendClient.RespondFriendApply, o.Client, c) + a2r.Call(c, relation.FriendClient.RespondFriendApply, o.Client) } func (o *FriendApi) DeleteFriend(c *gin.Context) { - a2r.Call(relation.FriendClient.DeleteFriend, o.Client, c) + a2r.Call(c, relation.FriendClient.DeleteFriend, o.Client) } func (o *FriendApi) GetFriendApplyList(c *gin.Context) { - a2r.Call(relation.FriendClient.GetPaginationFriendsApplyTo, o.Client, c) + a2r.Call(c, relation.FriendClient.GetPaginationFriendsApplyTo, o.Client) } func (o *FriendApi) GetDesignatedFriendsApply(c *gin.Context) { - a2r.Call(relation.FriendClient.GetDesignatedFriendsApply, o.Client, c) + a2r.Call(c, relation.FriendClient.GetDesignatedFriendsApply, o.Client) } func (o *FriendApi) GetSelfApplyList(c *gin.Context) { - a2r.Call(relation.FriendClient.GetPaginationFriendsApplyFrom, o.Client, c) + a2r.Call(c, relation.FriendClient.GetPaginationFriendsApplyFrom, o.Client) } func (o *FriendApi) GetFriendList(c *gin.Context) { - a2r.Call(relation.FriendClient.GetPaginationFriends, o.Client, c) + a2r.Call(c, relation.FriendClient.GetPaginationFriends, o.Client) } func (o *FriendApi) GetDesignatedFriends(c *gin.Context) { - a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c) + a2r.Call(c, relation.FriendClient.GetDesignatedFriends, o.Client) } func (o *FriendApi) SetFriendRemark(c *gin.Context) { - a2r.Call(relation.FriendClient.SetFriendRemark, o.Client, c) + a2r.Call(c, relation.FriendClient.SetFriendRemark, o.Client) } func (o *FriendApi) AddBlack(c *gin.Context) { - a2r.Call(relation.FriendClient.AddBlack, o.Client, c) + a2r.Call(c, relation.FriendClient.AddBlack, o.Client) } func (o *FriendApi) GetPaginationBlacks(c *gin.Context) { - a2r.Call(relation.FriendClient.GetPaginationBlacks, o.Client, c) + a2r.Call(c, relation.FriendClient.GetPaginationBlacks, o.Client) } func (o *FriendApi) GetSpecifiedBlacks(c *gin.Context) { - a2r.Call(relation.FriendClient.GetSpecifiedBlacks, o.Client, c) + a2r.Call(c, relation.FriendClient.GetSpecifiedBlacks, o.Client) } func (o *FriendApi) RemoveBlack(c *gin.Context) { - a2r.Call(relation.FriendClient.RemoveBlack, o.Client, c) + a2r.Call(c, relation.FriendClient.RemoveBlack, o.Client) } func (o *FriendApi) ImportFriends(c *gin.Context) { - a2r.Call(relation.FriendClient.ImportFriends, o.Client, c) + a2r.Call(c, relation.FriendClient.ImportFriends, o.Client) } func (o *FriendApi) IsFriend(c *gin.Context) { - a2r.Call(relation.FriendClient.IsFriend, o.Client, c) + a2r.Call(c, relation.FriendClient.IsFriend, o.Client) } func (o *FriendApi) GetFriendIDs(c *gin.Context) { - a2r.Call(relation.FriendClient.GetFriendIDs, o.Client, c) + a2r.Call(c, relation.FriendClient.GetFriendIDs, o.Client) } func (o *FriendApi) GetSpecifiedFriendsInfo(c *gin.Context) { - a2r.Call(relation.FriendClient.GetSpecifiedFriendsInfo, o.Client, c) + a2r.Call(c, relation.FriendClient.GetSpecifiedFriendsInfo, o.Client) } func (o *FriendApi) UpdateFriends(c *gin.Context) { - a2r.Call(relation.FriendClient.UpdateFriends, o.Client, c) + a2r.Call(c, relation.FriendClient.UpdateFriends, o.Client) } func (o *FriendApi) GetIncrementalFriends(c *gin.Context) { - a2r.Call(relation.FriendClient.GetIncrementalFriends, o.Client, c) + a2r.Call(c, relation.FriendClient.GetIncrementalFriends, o.Client) } // GetIncrementalBlacks is temporarily unused. // Deprecated: This function is currently unused and may be removed in future versions. func (o *FriendApi) GetIncrementalBlacks(c *gin.Context) { - a2r.Call(relation.FriendClient.GetIncrementalBlacks, o.Client, c) + a2r.Call(c, relation.FriendClient.GetIncrementalBlacks, o.Client) } func (o *FriendApi) GetFullFriendUserIDs(c *gin.Context) { - a2r.Call(relation.FriendClient.GetFullFriendUserIDs, o.Client, c) + a2r.Call(c, relation.FriendClient.GetFullFriendUserIDs, o.Client) } diff --git a/internal/api/group.go b/internal/api/group.go index 9c35da708..97b6b73f0 100644 --- a/internal/api/group.go +++ b/internal/api/group.go @@ -16,151 +16,152 @@ package api import ( "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/group" "github.com/openimsdk/tools/a2r" ) -type GroupApi rpcclient.Group +type GroupApi struct { + Client group.GroupClient +} -func NewGroupApi(client rpcclient.Group) GroupApi { - return GroupApi(client) +func NewGroupApi(client group.GroupClient) GroupApi { + return GroupApi{client} } func (o *GroupApi) CreateGroup(c *gin.Context) { - a2r.Call(group.GroupClient.CreateGroup, o.Client, c) + a2r.Call(c, group.GroupClient.CreateGroup, o.Client) } func (o *GroupApi) SetGroupInfo(c *gin.Context) { - a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c) + a2r.Call(c, group.GroupClient.SetGroupInfo, o.Client) } func (o *GroupApi) SetGroupInfoEx(c *gin.Context) { - a2r.Call(group.GroupClient.SetGroupInfoEx, o.Client, c) + a2r.Call(c, group.GroupClient.SetGroupInfoEx, o.Client) } func (o *GroupApi) JoinGroup(c *gin.Context) { - a2r.Call(group.GroupClient.JoinGroup, o.Client, c) + a2r.Call(c, group.GroupClient.JoinGroup, o.Client) } func (o *GroupApi) QuitGroup(c *gin.Context) { - a2r.Call(group.GroupClient.QuitGroup, o.Client, c) + a2r.Call(c, group.GroupClient.QuitGroup, o.Client) } func (o *GroupApi) ApplicationGroupResponse(c *gin.Context) { - a2r.Call(group.GroupClient.GroupApplicationResponse, o.Client, c) + a2r.Call(c, group.GroupClient.GroupApplicationResponse, o.Client) } func (o *GroupApi) TransferGroupOwner(c *gin.Context) { - a2r.Call(group.GroupClient.TransferGroupOwner, o.Client, c) + a2r.Call(c, group.GroupClient.TransferGroupOwner, o.Client) } func (o *GroupApi) GetRecvGroupApplicationList(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupApplicationList, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroupApplicationList, o.Client) } func (o *GroupApi) GetUserReqGroupApplicationList(c *gin.Context) { - a2r.Call(group.GroupClient.GetUserReqApplicationList, o.Client, c) + a2r.Call(c, group.GroupClient.GetUserReqApplicationList, o.Client) } func (o *GroupApi) GetGroupUsersReqApplicationList(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupUsersReqApplicationList, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroupUsersReqApplicationList, o.Client) } func (o *GroupApi) GetSpecifiedUserGroupRequestInfo(c *gin.Context) { - a2r.Call(group.GroupClient.GetSpecifiedUserGroupRequestInfo, o.Client, c) + a2r.Call(c, group.GroupClient.GetSpecifiedUserGroupRequestInfo, o.Client) } func (o *GroupApi) GetGroupsInfo(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c) - //a2r.Call(group.GroupClient.GetGroupsInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupsInfo)) + a2r.Call(c, group.GroupClient.GetGroupsInfo, o.Client) + //a2r.Call(c, group.GroupClient.GetGroupsInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupsInfo)) } func (o *GroupApi) KickGroupMember(c *gin.Context) { - a2r.Call(group.GroupClient.KickGroupMember, o.Client, c) + a2r.Call(c, group.GroupClient.KickGroupMember, o.Client) } func (o *GroupApi) GetGroupMembersInfo(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupMembersInfo, o.Client, c) - //a2r.Call(group.GroupClient.GetGroupMembersInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupMembersInfo)) + a2r.Call(c, group.GroupClient.GetGroupMembersInfo, o.Client) + //a2r.Call(c, group.GroupClient.GetGroupMembersInfo, o.Client, c, a2r.NewNilReplaceOption(group.GroupClient.GetGroupMembersInfo)) } func (o *GroupApi) GetGroupMemberList(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupMemberList, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroupMemberList, o.Client) } func (o *GroupApi) InviteUserToGroup(c *gin.Context) { - a2r.Call(group.GroupClient.InviteUserToGroup, o.Client, c) + a2r.Call(c, group.GroupClient.InviteUserToGroup, o.Client) } func (o *GroupApi) GetJoinedGroupList(c *gin.Context) { - a2r.Call(group.GroupClient.GetJoinedGroupList, o.Client, c) + a2r.Call(c, group.GroupClient.GetJoinedGroupList, o.Client) } func (o *GroupApi) DismissGroup(c *gin.Context) { - a2r.Call(group.GroupClient.DismissGroup, o.Client, c) + a2r.Call(c, group.GroupClient.DismissGroup, o.Client) } func (o *GroupApi) MuteGroupMember(c *gin.Context) { - a2r.Call(group.GroupClient.MuteGroupMember, o.Client, c) + a2r.Call(c, group.GroupClient.MuteGroupMember, o.Client) } func (o *GroupApi) CancelMuteGroupMember(c *gin.Context) { - a2r.Call(group.GroupClient.CancelMuteGroupMember, o.Client, c) + a2r.Call(c, group.GroupClient.CancelMuteGroupMember, o.Client) } func (o *GroupApi) MuteGroup(c *gin.Context) { - a2r.Call(group.GroupClient.MuteGroup, o.Client, c) + a2r.Call(c, group.GroupClient.MuteGroup, o.Client) } func (o *GroupApi) CancelMuteGroup(c *gin.Context) { - a2r.Call(group.GroupClient.CancelMuteGroup, o.Client, c) + a2r.Call(c, group.GroupClient.CancelMuteGroup, o.Client) } func (o *GroupApi) SetGroupMemberInfo(c *gin.Context) { - a2r.Call(group.GroupClient.SetGroupMemberInfo, o.Client, c) + a2r.Call(c, group.GroupClient.SetGroupMemberInfo, o.Client) } func (o *GroupApi) GetGroupAbstractInfo(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupAbstractInfo, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroupAbstractInfo, o.Client) } // func (g *Group) SetGroupMemberNickname(c *gin.Context) { -// a2r.Call(group.GroupClient.SetGroupMemberNickname, g.userClient, c) +// a2r.Call(c, group.GroupClient.SetGroupMemberNickname, g.userClient) //} // // func (g *Group) GetGroupAllMemberList(c *gin.Context) { -// a2r.Call(group.GroupClient.GetGroupAllMember, g.userClient, c) +// a2r.Call(c, group.GroupClient.GetGroupAllMember, g.userClient) //} func (o *GroupApi) GroupCreateCount(c *gin.Context) { - a2r.Call(group.GroupClient.GroupCreateCount, o.Client, c) + a2r.Call(c, group.GroupClient.GroupCreateCount, o.Client) } func (o *GroupApi) GetGroups(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroups, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroups, o.Client) } func (o *GroupApi) GetGroupMemberUserIDs(c *gin.Context) { - a2r.Call(group.GroupClient.GetGroupMemberUserIDs, o.Client, c) + a2r.Call(c, group.GroupClient.GetGroupMemberUserIDs, o.Client) } func (o *GroupApi) GetIncrementalJoinGroup(c *gin.Context) { - a2r.Call(group.GroupClient.GetIncrementalJoinGroup, o.Client, c) + a2r.Call(c, group.GroupClient.GetIncrementalJoinGroup, o.Client) } func (o *GroupApi) GetIncrementalGroupMember(c *gin.Context) { - a2r.Call(group.GroupClient.GetIncrementalGroupMember, o.Client, c) + a2r.Call(c, group.GroupClient.GetIncrementalGroupMember, o.Client) } func (o *GroupApi) GetIncrementalGroupMemberBatch(c *gin.Context) { - a2r.Call(group.GroupClient.BatchGetIncrementalGroupMember, o.Client, c) + a2r.Call(c, group.GroupClient.BatchGetIncrementalGroupMember, o.Client) } func (o *GroupApi) GetFullGroupMemberUserIDs(c *gin.Context) { - a2r.Call(group.GroupClient.GetFullGroupMemberUserIDs, o.Client, c) + a2r.Call(c, group.GroupClient.GetFullGroupMemberUserIDs, o.Client) } func (o *GroupApi) GetFullJoinGroupIDs(c *gin.Context) { - a2r.Call(group.GroupClient.GetFullJoinGroupIDs, o.Client, c) + a2r.Call(c, group.GroupClient.GetFullJoinGroupIDs, o.Client) } diff --git a/internal/api/init.go b/internal/api/init.go index e83dfc2ea..2a388655c 100644 --- a/internal/api/init.go +++ b/internal/api/init.go @@ -62,7 +62,10 @@ func Start(ctx context.Context, index int, config *Config) error { prometheusPort int ) - router := newGinRouter(client, config) + router, err := newGinRouter(ctx, client, config) + if err != nil { + return err + } if config.API.Prometheus.Enable { go func() { prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index) diff --git a/internal/api/jssdk/jssdk.go b/internal/api/jssdk/jssdk.go index 2fcbf5ec6..3c0911207 100644 --- a/internal/api/jssdk/jssdk.go +++ b/internal/api/jssdk/jssdk.go @@ -2,17 +2,17 @@ package jssdk import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "sort" + "github.com/gin-gonic/gin" "github.com/openimsdk/protocol/conversation" - "github.com/openimsdk/protocol/group" "github.com/openimsdk/protocol/jssdk" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/relation" "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/protocol/user" "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/datautil" - "sort" ) const ( @@ -20,22 +20,23 @@ const ( defaultGetActiveConversation = 100 ) -func NewJSSdkApi(user user.UserClient, friend relation.FriendClient, group group.GroupClient, msg msg.MsgClient, conv conversation.ConversationClient) *JSSdk { +func NewJSSdkApi(userClient *rpcli.UserClient, relationClient *rpcli.RelationClient, groupClient *rpcli.GroupClient, + conversationClient *rpcli.ConversationClient, msgClient *rpcli.MsgClient) *JSSdk { return &JSSdk{ - user: user, - friend: friend, - group: group, - msg: msg, - conv: conv, + userClient: userClient, + relationClient: relationClient, + groupClient: groupClient, + conversationClient: conversationClient, + msgClient: msgClient, } } type JSSdk struct { - user user.UserClient - friend relation.FriendClient - group group.GroupClient - msg msg.MsgClient - conv conversation.ConversationClient + userClient *rpcli.UserClient + relationClient *rpcli.RelationClient + groupClient *rpcli.GroupClient + conversationClient *rpcli.ConversationClient + msgClient *rpcli.MsgClient } func (x *JSSdk) GetActiveConversations(c *gin.Context) { @@ -67,11 +68,11 @@ func (x *JSSdk) fillConversations(ctx context.Context, conversations []*jssdk.Co groupMap map[string]*sdkws.GroupInfo ) if len(userIDs) > 0 { - users, err := field(ctx, x.user.GetDesignateUsers, &user.GetDesignateUsersReq{UserIDs: userIDs}, (*user.GetDesignateUsersResp).GetUsersInfo) + users, err := x.userClient.GetUsersInfo(ctx, userIDs) if err != nil { return err } - friends, err := field(ctx, x.friend.GetFriendInfo, &relation.GetFriendInfoReq{OwnerUserID: conversations[0].Conversation.OwnerUserID, FriendUserIDs: userIDs}, (*relation.GetFriendInfoResp).GetFriendInfos) + friends, err := x.relationClient.GetFriendsInfo(ctx, conversations[0].Conversation.OwnerUserID, userIDs) if err != nil { return err } @@ -79,11 +80,11 @@ func (x *JSSdk) fillConversations(ctx context.Context, conversations []*jssdk.Co friendMap = datautil.SliceToMap(friends, (*relation.FriendInfoOnly).GetFriendUserID) } if len(groupIDs) > 0 { - resp, err := x.group.GetGroupsInfo(ctx, &group.GetGroupsInfoReq{GroupIDs: groupIDs}) + groups, err := x.groupClient.GetGroupsInfo(ctx, groupIDs) if err != nil { return err } - groupMap = datautil.SliceToMap(resp.GroupInfos, (*sdkws.GroupInfo).GetGroupID) + groupMap = datautil.SliceToMap(groups, (*sdkws.GroupInfo).GetGroupID) } for _, c := range conversations { if c.Conversation.GroupID == "" { @@ -101,21 +102,18 @@ func (x *JSSdk) getActiveConversations(ctx context.Context, req *jssdk.GetActive req.Count = defaultGetActiveConversation } req.OwnerUserID = mcontext.GetOpUserID(ctx) - conversationIDs, err := field(ctx, x.conv.GetConversationIDs, - &conversation.GetConversationIDsReq{UserID: req.OwnerUserID}, (*conversation.GetConversationIDsResp).GetConversationIDs) + conversationIDs, err := x.conversationClient.GetConversationIDs(ctx, req.OwnerUserID) if err != nil { return nil, err } if len(conversationIDs) == 0 { return &jssdk.GetActiveConversationsResp{}, nil } - readSeq, err := field(ctx, x.msg.GetHasReadSeqs, - &msg.GetHasReadSeqsReq{UserID: req.OwnerUserID, ConversationIDs: conversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + readSeq, err := x.msgClient.GetHasReadSeqs(ctx, conversationIDs, req.OwnerUserID) if err != nil { return nil, err } - activeConversation, err := field(ctx, x.msg.GetActiveConversation, - &msg.GetActiveConversationReq{ConversationIDs: conversationIDs}, (*msg.GetActiveConversationResp).GetConversations) + activeConversation, err := x.msgClient.GetActiveConversation(ctx, conversationIDs) if err != nil { return nil, err } @@ -126,8 +124,7 @@ func (x *JSSdk) getActiveConversations(ctx context.Context, req *jssdk.GetActive Conversation: activeConversation, } if len(activeConversation) > 1 { - pinnedConversationIDs, err := field(ctx, x.conv.GetPinnedConversationIDs, - &conversation.GetPinnedConversationIDsReq{UserID: req.OwnerUserID}, (*conversation.GetPinnedConversationIDsResp).GetConversationIDs) + pinnedConversationIDs, err := x.conversationClient.GetPinnedConversationIDs(ctx, req.OwnerUserID) if err != nil { return nil, err } @@ -135,25 +132,18 @@ func (x *JSSdk) getActiveConversations(ctx context.Context, req *jssdk.GetActive } sort.Sort(&sortConversations) sortList := sortConversations.Top(int(req.Count)) - conversations, err := field(ctx, x.conv.GetConversations, - &conversation.GetConversationsReq{ - OwnerUserID: req.OwnerUserID, - ConversationIDs: datautil.Slice(sortList, func(c *msg.ActiveConversation) string { - return c.ConversationID - })}, (*conversation.GetConversationsResp).GetConversations) + conversations, err := x.conversationClient.GetConversations(ctx, datautil.Slice(sortList, func(c *msg.ActiveConversation) string { + return c.ConversationID + }), req.OwnerUserID) if err != nil { return nil, err } - msgs, err := field(ctx, x.msg.GetSeqMessage, - &msg.GetSeqMessageReq{ - UserID: req.OwnerUserID, - Conversations: datautil.Slice(sortList, func(c *msg.ActiveConversation) *msg.ConversationSeqs { - return &msg.ConversationSeqs{ - ConversationID: c.ConversationID, - Seqs: []int64{c.MaxSeq}, - } - }), - }, (*msg.GetSeqMessageResp).GetMsgs) + msgs, err := x.msgClient.GetSeqMessage(ctx, req.OwnerUserID, datautil.Slice(sortList, func(c *msg.ActiveConversation) *msg.ConversationSeqs { + return &msg.ConversationSeqs{ + ConversationID: c.ConversationID, + Seqs: []int64{c.MaxSeq}, + } + })) if err != nil { return nil, err } @@ -195,7 +185,7 @@ func (x *JSSdk) getActiveConversations(ctx context.Context, req *jssdk.GetActive func (x *JSSdk) getConversations(ctx context.Context, req *jssdk.GetConversationsReq) (*jssdk.GetConversationsResp, error) { req.OwnerUserID = mcontext.GetOpUserID(ctx) - conversations, err := field(ctx, x.conv.GetConversations, &conversation.GetConversationsReq{OwnerUserID: req.OwnerUserID, ConversationIDs: req.ConversationIDs}, (*conversation.GetConversationsResp).GetConversations) + conversations, err := x.conversationClient.GetConversations(ctx, req.ConversationIDs, req.OwnerUserID) if err != nil { return nil, err } @@ -205,13 +195,11 @@ func (x *JSSdk) getConversations(ctx context.Context, req *jssdk.GetConversation req.ConversationIDs = datautil.Slice(conversations, func(c *conversation.Conversation) string { return c.ConversationID }) - maxSeqs, err := field(ctx, x.msg.GetMaxSeqs, - &msg.GetMaxSeqsReq{ConversationIDs: req.ConversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + maxSeqs, err := x.msgClient.GetMaxSeqs(ctx, req.ConversationIDs) if err != nil { return nil, err } - readSeqs, err := field(ctx, x.msg.GetHasReadSeqs, - &msg.GetHasReadSeqsReq{UserID: req.OwnerUserID, ConversationIDs: req.ConversationIDs}, (*msg.SeqsInfoResp).GetMaxSeqs) + readSeqs, err := x.msgClient.GetHasReadSeqs(ctx, req.ConversationIDs, req.OwnerUserID) if err != nil { return nil, err } @@ -226,8 +214,7 @@ func (x *JSSdk) getConversations(ctx context.Context, req *jssdk.GetConversation } var msgs map[string]*sdkws.PullMsgs if len(conversationSeqs) > 0 { - msgs, err = field(ctx, x.msg.GetSeqMessage, - &msg.GetSeqMessageReq{UserID: req.OwnerUserID, Conversations: conversationSeqs}, (*msg.GetSeqMessageResp).GetMsgs) + msgs, err = x.msgClient.GetSeqMessage(ctx, req.OwnerUserID, conversationSeqs) if err != nil { return nil, err } diff --git a/internal/api/msg.go b/internal/api/msg.go index bf7cb83a4..fc235354c 100644 --- a/internal/api/msg.go +++ b/internal/api/msg.go @@ -21,7 +21,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/apistruct" "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" @@ -37,16 +37,14 @@ import ( ) type MessageApi struct { - *rpcclient.Message - validate *validator.Validate - userRpcClient *rpcclient.UserRpcClient + Client msg.MsgClient + userClient *rpcli.UserClient imAdminUserID []string + validate *validator.Validate } -func NewMessageApi(msgRpcClient *rpcclient.Message, userRpcClient *rpcclient.User, - imAdminUserID []string) MessageApi { - return MessageApi{Message: msgRpcClient, validate: validator.New(), - userRpcClient: rpcclient.NewUserRpcClientByUser(userRpcClient), imAdminUserID: imAdminUserID} +func NewMessageApi(client msg.MsgClient, userClient *rpcli.UserClient, imAdminUserID []string) MessageApi { + return MessageApi{Client: client, userClient: userClient, imAdminUserID: imAdminUserID, validate: validator.New()} } func (*MessageApi) SetOptions(options map[string]bool, value bool) { @@ -108,51 +106,51 @@ func (m *MessageApi) newUserSendMsgReq(_ *gin.Context, params *apistruct.SendMsg } func (m *MessageApi) GetSeq(c *gin.Context) { - a2r.Call(msg.MsgClient.GetMaxSeq, m.Client, c) + a2r.Call(c, msg.MsgClient.GetMaxSeq, m.Client) } func (m *MessageApi) PullMsgBySeqs(c *gin.Context) { - a2r.Call(msg.MsgClient.PullMessageBySeqs, m.Client, c) + a2r.Call(c, msg.MsgClient.PullMessageBySeqs, m.Client) } func (m *MessageApi) RevokeMsg(c *gin.Context) { - a2r.Call(msg.MsgClient.RevokeMsg, m.Client, c) + a2r.Call(c, msg.MsgClient.RevokeMsg, m.Client) } func (m *MessageApi) MarkMsgsAsRead(c *gin.Context) { - a2r.Call(msg.MsgClient.MarkMsgsAsRead, m.Client, c) + a2r.Call(c, msg.MsgClient.MarkMsgsAsRead, m.Client) } func (m *MessageApi) MarkConversationAsRead(c *gin.Context) { - a2r.Call(msg.MsgClient.MarkConversationAsRead, m.Client, c) + a2r.Call(c, msg.MsgClient.MarkConversationAsRead, m.Client) } func (m *MessageApi) GetConversationsHasReadAndMaxSeq(c *gin.Context) { - a2r.Call(msg.MsgClient.GetConversationsHasReadAndMaxSeq, m.Client, c) + a2r.Call(c, msg.MsgClient.GetConversationsHasReadAndMaxSeq, m.Client) } func (m *MessageApi) SetConversationHasReadSeq(c *gin.Context) { - a2r.Call(msg.MsgClient.SetConversationHasReadSeq, m.Client, c) + a2r.Call(c, msg.MsgClient.SetConversationHasReadSeq, m.Client) } func (m *MessageApi) ClearConversationsMsg(c *gin.Context) { - a2r.Call(msg.MsgClient.ClearConversationsMsg, m.Client, c) + a2r.Call(c, msg.MsgClient.ClearConversationsMsg, m.Client) } func (m *MessageApi) UserClearAllMsg(c *gin.Context) { - a2r.Call(msg.MsgClient.UserClearAllMsg, m.Client, c) + a2r.Call(c, msg.MsgClient.UserClearAllMsg, m.Client) } func (m *MessageApi) DeleteMsgs(c *gin.Context) { - a2r.Call(msg.MsgClient.DeleteMsgs, m.Client, c) + a2r.Call(c, msg.MsgClient.DeleteMsgs, m.Client) } func (m *MessageApi) DeleteMsgPhysicalBySeq(c *gin.Context) { - a2r.Call(msg.MsgClient.DeleteMsgPhysicalBySeq, m.Client, c) + a2r.Call(c, msg.MsgClient.DeleteMsgPhysicalBySeq, m.Client) } func (m *MessageApi) DeleteMsgPhysical(c *gin.Context) { - a2r.Call(msg.MsgClient.DeleteMsgPhysical, m.Client, c) + a2r.Call(c, msg.MsgClient.DeleteMsgPhysical, m.Client) } func (m *MessageApi) getSendMsgReq(c *gin.Context, req apistruct.SendMsg) (sendMsgReq *msg.SendMsgReq, err error) { @@ -176,7 +174,7 @@ func (m *MessageApi) getSendMsgReq(c *gin.Context, req apistruct.SendMsg) (sendM case constant.OANotification: data = apistruct.OANotificationElem{} req.SessionType = constant.NotificationChatType - if err = m.userRpcClient.GetNotificationByID(c, req.SendID); err != nil { + if err = m.userClient.GetNotificationByID(c, req.SendID); err != nil { return nil, err } default: @@ -310,10 +308,10 @@ func (m *MessageApi) BatchSendMsg(c *gin.Context) { var recvIDs []string if req.IsSendAll { - pageNumber := 1 - showNumber := 500 + var pageNumber int32 = 1 + const showNumber = 500 for { - recvIDsPart, err := m.userRpcClient.GetAllUserIDs(c, int32(pageNumber), int32(showNumber)) + recvIDsPart, err := m.userClient.GetAllUserIDs(c, pageNumber, showNumber) if err != nil { apiresp.GinError(c, err) return @@ -351,25 +349,33 @@ func (m *MessageApi) BatchSendMsg(c *gin.Context) { } func (m *MessageApi) CheckMsgIsSendSuccess(c *gin.Context) { - a2r.Call(msg.MsgClient.GetSendMsgStatus, m.Client, c) + a2r.Call(c, msg.MsgClient.GetSendMsgStatus, m.Client) } func (m *MessageApi) GetUsersOnlineStatus(c *gin.Context) { - a2r.Call(msg.MsgClient.GetSendMsgStatus, m.Client, c) + a2r.Call(c, msg.MsgClient.GetSendMsgStatus, m.Client) } func (m *MessageApi) GetActiveUser(c *gin.Context) { - a2r.Call(msg.MsgClient.GetActiveUser, m.Client, c) + a2r.Call(c, msg.MsgClient.GetActiveUser, m.Client) } func (m *MessageApi) GetActiveGroup(c *gin.Context) { - a2r.Call(msg.MsgClient.GetActiveGroup, m.Client, c) + a2r.Call(c, msg.MsgClient.GetActiveGroup, m.Client) } func (m *MessageApi) SearchMsg(c *gin.Context) { - a2r.Call(msg.MsgClient.SearchMessage, m.Client, c) + a2r.Call(c, msg.MsgClient.SearchMessage, m.Client) } func (m *MessageApi) GetServerTime(c *gin.Context) { - a2r.Call(msg.MsgClient.GetServerTime, m.Client, c) + a2r.Call(c, msg.MsgClient.GetServerTime, m.Client) +} + +func (m *MessageApi) GetStreamMsg(c *gin.Context) { + a2r.Call(c, msg.MsgClient.GetServerTime, m.Client) +} + +func (m *MessageApi) AppendStreamMsg(c *gin.Context) { + a2r.Call(c, msg.MsgClient.GetServerTime, m.Client) } diff --git a/internal/api/router.go b/internal/api/router.go index 85c48b284..9b3fac24f 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -1,7 +1,18 @@ package api import ( + "context" "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + pbAuth "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/conversation" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/third" + "github.com/openimsdk/protocol/user" + "net/http" + "strings" "github.com/openimsdk/open-im-server/v3/internal/api/jssdk" @@ -13,12 +24,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "net/http" - "strings" - "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/tools/apiresp" "github.com/openimsdk/tools/discovery" @@ -48,23 +55,42 @@ func prommetricsGin() gin.HandlerFunc { } } -func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine { - disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), +func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config) (*gin.Engine, error) { + client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) + authConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Auth) + if err != nil { + return nil, err + } + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return nil, err + } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return nil, err + } + friendConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Friend) + if err != nil { + return nil, err + } + conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) + if err != nil { + return nil, err + } + thirdConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third) + if err != nil { + return nil, err + } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return nil, err + } gin.SetMode(gin.ReleaseMode) r := gin.New() if v, ok := binding.Validator.Engine().(*validator.Validate); ok { _ = v.RegisterValidation("required_if", RequiredIf) } - // init rpc client here - userRpc := rpcclient.NewUser(disCov, config.Share.RpcRegisterName.User, config.Share.RpcRegisterName.MessageGateway, - config.Share.IMAdminUserID) - groupRpc := rpcclient.NewGroup(disCov, config.Share.RpcRegisterName.Group) - friendRpc := rpcclient.NewFriend(disCov, config.Share.RpcRegisterName.Friend) - messageRpc := rpcclient.NewMessage(disCov, config.Share.RpcRegisterName.Msg) - conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation) - authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) - thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL) switch config.API.Api.CompressionLevel { case NoCompression: case DefaultCompression: @@ -74,12 +100,12 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En case BestSpeed: r.Use(gzip.Gzip(gzip.BestSpeed)) } - r.Use(prommetricsGin(), gin.RecoveryWithWriter(gin.DefaultErrorWriter, mw.GinPanicErr), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) - u := NewUserApi(*userRpc) - m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) - j := jssdk.NewJSSdkApi(userRpc.Client, friendRpc.Client, groupRpc.Client, messageRpc.Client, conversationRpc.Client) - userRouterGroup := r.Group("/user") + r.Use(prommetricsGin(), gin.RecoveryWithWriter(gin.DefaultErrorWriter, mw.GinPanicErr), mw.CorsHandler(), + mw.GinParseOperationID(), GinParseToken(rpcli.NewAuthClient(authConn))) + + u := NewUserApi(user.NewUserClient(userConn), client, config.Share.RpcRegisterName) { + userRouterGroup := r.Group("/user") userRouterGroup.POST("/user_register", u.UserRegister) userRouterGroup.POST("/update_user_info", u.UpdateUserInfo) userRouterGroup.POST("/update_user_info_ex", u.UpdateUserInfoEx) @@ -105,9 +131,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En userRouterGroup.POST("/search_notification_account", u.SearchNotificationAccount) } // friend routing group - friendRouterGroup := r.Group("/friend") { - f := NewFriendApi(*friendRpc) + f := NewFriendApi(relation.NewFriendClient(friendConn)) + friendRouterGroup := r.Group("/friend") friendRouterGroup.POST("/delete_friend", f.DeleteFriend) friendRouterGroup.POST("/get_friend_apply_list", f.GetFriendApplyList) friendRouterGroup.POST("/get_designated_friend_apply", f.GetDesignatedFriendsApply) @@ -130,9 +156,10 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En friendRouterGroup.POST("/get_incremental_friends", f.GetIncrementalFriends) friendRouterGroup.POST("/get_full_friend_user_ids", f.GetFullFriendUserIDs) } - g := NewGroupApi(*groupRpc) - groupRouterGroup := r.Group("/group") + + g := NewGroupApi(group.NewGroupClient(groupConn)) { + groupRouterGroup := r.Group("/group") groupRouterGroup.POST("/create_group", g.CreateGroup) groupRouterGroup.POST("/set_group_info", g.SetGroupInfo) groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEx) @@ -166,18 +193,19 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En groupRouterGroup.POST("/get_full_join_group_ids", g.GetFullJoinGroupIDs) } // certificate - authRouterGroup := r.Group("/auth") { - a := NewAuthApi(*authRpc) + a := NewAuthApi(pbAuth.NewAuthClient(authConn)) + authRouterGroup := r.Group("/auth") authRouterGroup.POST("/get_admin_token", a.GetAdminToken) authRouterGroup.POST("/get_user_token", a.GetUserToken) authRouterGroup.POST("/parse_token", a.ParseToken) authRouterGroup.POST("/force_logout", a.ForceLogout) + } // Third service - thirdGroup := r.Group("/third") { - t := NewThirdApi(*thirdRpc) + t := NewThirdApi(third.NewThirdClient(thirdConn), config.API.Prometheus.GrafanaURL) + thirdGroup := r.Group("/third") thirdGroup.GET("/prometheus", t.GetPrometheus) thirdGroup.POST("/fcm_update_token", t.FcmUpdateToken) thirdGroup.POST("/set_app_badge", t.SetAppBadge) @@ -200,8 +228,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En objectGroup.GET("/*name", t.ObjectRedirect) } // Message - msgGroup := r.Group("/msg") + m := NewMessageApi(msg.NewMsgClient(msgConn), rpcli.NewUserClient(userConn), config.Share.IMAdminUserID) { + msgGroup := r.Group("/msg") msgGroup.POST("/newest_seq", m.GetSeq) msgGroup.POST("/search_msg", m.SearchMsg) msgGroup.POST("/send_msg", m.SendMessage) @@ -224,9 +253,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En msgGroup.POST("/get_server_time", m.GetServerTime) } // Conversation - conversationGroup := r.Group("/conversation") { - c := NewConversationApi(*conversationRpc) + c := NewConversationApi(conversation.NewConversationClient(conversationConn)) + conversationGroup := r.Group("/conversation") conversationGroup.POST("/get_sorted_conversation_list", c.GetSortedConversationList) conversationGroup.POST("/get_all_conversations", c.GetAllConversations) conversationGroup.POST("/get_conversation", c.GetConversation) @@ -240,22 +269,26 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En conversationGroup.POST("/get_pinned_conversation_ids", c.GetPinnedConversationIDs) } - statisticsGroup := r.Group("/statistics") { + statisticsGroup := r.Group("/statistics") statisticsGroup.POST("/user/register", u.UserRegisterCount) statisticsGroup.POST("/user/active", m.GetActiveUser) statisticsGroup.POST("/group/create", g.GroupCreateCount) statisticsGroup.POST("/group/active", m.GetActiveGroup) } - jssdk := r.Group("/jssdk") - jssdk.POST("/get_conversations", j.GetConversations) - jssdk.POST("/get_active_conversations", j.GetActiveConversations) + { + j := jssdk.NewJSSdkApi(rpcli.NewUserClient(userConn), rpcli.NewRelationClient(friendConn), + rpcli.NewGroupClient(groupConn), rpcli.NewConversationClient(conversationConn), rpcli.NewMsgClient(msgConn)) + jssdk := r.Group("/jssdk") + jssdk.POST("/get_conversations", j.GetConversations) + jssdk.POST("/get_active_conversations", j.GetActiveConversations) + } - return r + return r, nil } -func GinParseToken(authRPC *rpcclient.Auth) gin.HandlerFunc { +func GinParseToken(authClient *rpcli.AuthClient) gin.HandlerFunc { return func(c *gin.Context) { switch c.Request.Method { case http.MethodPost: @@ -273,7 +306,7 @@ func GinParseToken(authRPC *rpcclient.Auth) gin.HandlerFunc { c.Abort() return } - resp, err := authRPC.ParseToken(c, token) + resp, err := authClient.ParseToken(c, token) if err != nil { apiresp.GinError(c, err) c.Abort() diff --git a/internal/api/statistics.go b/internal/api/statistics.go deleted file mode 100644 index f5ee99f73..000000000 --- a/internal/api/statistics.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" - "github.com/openimsdk/protocol/user" - "github.com/openimsdk/tools/a2r" -) - -type StatisticsApi rpcclient.User - -func NewStatisticsApi(client rpcclient.User) StatisticsApi { - return StatisticsApi(client) -} - -func (s *StatisticsApi) UserRegister(c *gin.Context) { - a2r.Call(user.UserClient.UserRegisterCount, s.Client, c) -} diff --git a/internal/api/third.go b/internal/api/third.go index 6baa70ee5..7ecb32911 100644 --- a/internal/api/third.go +++ b/internal/api/third.go @@ -24,25 +24,27 @@ import ( "strings" "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/a2r" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/mcontext" ) -type ThirdApi rpcclient.Third +type ThirdApi struct { + GrafanaUrl string + Client third.ThirdClient +} -func NewThirdApi(client rpcclient.Third) ThirdApi { - return ThirdApi(client) +func NewThirdApi(client third.ThirdClient, grafanaUrl string) ThirdApi { + return ThirdApi{Client: client, GrafanaUrl: grafanaUrl} } func (o *ThirdApi) FcmUpdateToken(c *gin.Context) { - a2r.Call(third.ThirdClient.FcmUpdateToken, o.Client, c) + a2r.Call(c, third.ThirdClient.FcmUpdateToken, o.Client) } func (o *ThirdApi) SetAppBadge(c *gin.Context) { - a2r.Call(third.ThirdClient.SetAppBadge, o.Client, c) + a2r.Call(c, third.ThirdClient.SetAppBadge, o.Client) } // #################### s3 #################### @@ -77,44 +79,44 @@ func setURLPrefix(c *gin.Context, urlPrefix *string) error { } func (o *ThirdApi) PartLimit(c *gin.Context) { - a2r.Call(third.ThirdClient.PartLimit, o.Client, c) + a2r.Call(c, third.ThirdClient.PartLimit, o.Client) } func (o *ThirdApi) PartSize(c *gin.Context) { - a2r.Call(third.ThirdClient.PartSize, o.Client, c) + a2r.Call(c, third.ThirdClient.PartSize, o.Client) } func (o *ThirdApi) InitiateMultipartUpload(c *gin.Context) { opt := setURLPrefixOption(third.ThirdClient.InitiateMultipartUpload, func(req *third.InitiateMultipartUploadReq) error { return setURLPrefix(c, &req.UrlPrefix) }) - a2r.Call(third.ThirdClient.InitiateMultipartUpload, o.Client, c, opt) + a2r.Call(c, third.ThirdClient.InitiateMultipartUpload, o.Client, opt) } func (o *ThirdApi) AuthSign(c *gin.Context) { - a2r.Call(third.ThirdClient.AuthSign, o.Client, c) + a2r.Call(c, third.ThirdClient.AuthSign, o.Client) } func (o *ThirdApi) CompleteMultipartUpload(c *gin.Context) { opt := setURLPrefixOption(third.ThirdClient.CompleteMultipartUpload, func(req *third.CompleteMultipartUploadReq) error { return setURLPrefix(c, &req.UrlPrefix) }) - a2r.Call(third.ThirdClient.CompleteMultipartUpload, o.Client, c, opt) + a2r.Call(c, third.ThirdClient.CompleteMultipartUpload, o.Client, opt) } func (o *ThirdApi) AccessURL(c *gin.Context) { - a2r.Call(third.ThirdClient.AccessURL, o.Client, c) + a2r.Call(c, third.ThirdClient.AccessURL, o.Client) } func (o *ThirdApi) InitiateFormData(c *gin.Context) { - a2r.Call(third.ThirdClient.InitiateFormData, o.Client, c) + a2r.Call(c, third.ThirdClient.InitiateFormData, o.Client) } func (o *ThirdApi) CompleteFormData(c *gin.Context) { opt := setURLPrefixOption(third.ThirdClient.CompleteFormData, func(req *third.CompleteFormDataReq) error { return setURLPrefix(c, &req.UrlPrefix) }) - a2r.Call(third.ThirdClient.CompleteFormData, o.Client, c, opt) + a2r.Call(c, third.ThirdClient.CompleteFormData, o.Client, opt) } func (o *ThirdApi) ObjectRedirect(c *gin.Context) { @@ -156,15 +158,15 @@ func (o *ThirdApi) ObjectRedirect(c *gin.Context) { // #################### logs ####################. func (o *ThirdApi) UploadLogs(c *gin.Context) { - a2r.Call(third.ThirdClient.UploadLogs, o.Client, c) + a2r.Call(c, third.ThirdClient.UploadLogs, o.Client) } func (o *ThirdApi) DeleteLogs(c *gin.Context) { - a2r.Call(third.ThirdClient.DeleteLogs, o.Client, c) + a2r.Call(c, third.ThirdClient.DeleteLogs, o.Client) } func (o *ThirdApi) SearchLogs(c *gin.Context) { - a2r.Call(third.ThirdClient.SearchLogs, o.Client, c) + a2r.Call(c, third.ThirdClient.SearchLogs, o.Client) } func (o *ThirdApi) GetPrometheus(c *gin.Context) { diff --git a/internal/api/user.go b/internal/api/user.go index b499f71dc..154e6d908 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -16,52 +16,57 @@ package api import ( "github.com/gin-gonic/gin" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/user" "github.com/openimsdk/tools/a2r" "github.com/openimsdk/tools/apiresp" + "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" ) -type UserApi rpcclient.User +type UserApi struct { + Client user.UserClient + discov discovery.SvcDiscoveryRegistry + config config.RpcRegisterName +} -func NewUserApi(client rpcclient.User) UserApi { - return UserApi(client) +func NewUserApi(client user.UserClient, discov discovery.SvcDiscoveryRegistry, config config.RpcRegisterName) UserApi { + return UserApi{Client: client, discov: discov, config: config} } func (u *UserApi) UserRegister(c *gin.Context) { - a2r.Call(user.UserClient.UserRegister, u.Client, c) + a2r.Call(c, user.UserClient.UserRegister, u.Client) } // UpdateUserInfo is deprecated. Use UpdateUserInfoEx func (u *UserApi) UpdateUserInfo(c *gin.Context) { - a2r.Call(user.UserClient.UpdateUserInfo, u.Client, c) + a2r.Call(c, user.UserClient.UpdateUserInfo, u.Client) } func (u *UserApi) UpdateUserInfoEx(c *gin.Context) { - a2r.Call(user.UserClient.UpdateUserInfoEx, u.Client, c) + a2r.Call(c, user.UserClient.UpdateUserInfoEx, u.Client) } func (u *UserApi) SetGlobalRecvMessageOpt(c *gin.Context) { - a2r.Call(user.UserClient.SetGlobalRecvMessageOpt, u.Client, c) + a2r.Call(c, user.UserClient.SetGlobalRecvMessageOpt, u.Client) } func (u *UserApi) GetUsersPublicInfo(c *gin.Context) { - a2r.Call(user.UserClient.GetDesignateUsers, u.Client, c) + a2r.Call(c, user.UserClient.GetDesignateUsers, u.Client) } func (u *UserApi) GetAllUsersID(c *gin.Context) { - a2r.Call(user.UserClient.GetAllUserID, u.Client, c) + a2r.Call(c, user.UserClient.GetAllUserID, u.Client) } func (u *UserApi) AccountCheck(c *gin.Context) { - a2r.Call(user.UserClient.AccountCheck, u.Client, c) + a2r.Call(c, user.UserClient.AccountCheck, u.Client) } func (u *UserApi) GetUsers(c *gin.Context) { - a2r.Call(user.UserClient.GetPaginationUsers, u.Client, c) + a2r.Call(c, user.UserClient.GetPaginationUsers, u.Client) } // GetUsersOnlineStatus Get user online status. @@ -71,7 +76,7 @@ func (u *UserApi) GetUsersOnlineStatus(c *gin.Context) { apiresp.GinError(c, err) return } - conns, err := u.Discov.GetConns(c, u.MessageGateWayRpcName) + conns, err := u.discov.GetConns(c, u.config.MessageGateway) if err != nil { apiresp.GinError(c, err) return @@ -122,7 +127,7 @@ func (u *UserApi) GetUsersOnlineStatus(c *gin.Context) { } func (u *UserApi) UserRegisterCount(c *gin.Context) { - a2r.Call(user.UserClient.UserRegisterCount, u.Client, c) + a2r.Call(c, user.UserClient.UserRegisterCount, u.Client) } // GetUsersOnlineTokenDetail Get user online token details. @@ -135,7 +140,7 @@ func (u *UserApi) GetUsersOnlineTokenDetail(c *gin.Context) { apiresp.GinError(c, errs.ErrArgs.WithDetail(err.Error()).Wrap()) return } - conns, err := u.Discov.GetConns(c, u.MessageGateWayRpcName) + conns, err := u.discov.GetConns(c, u.config.MessageGateway) if err != nil { apiresp.GinError(c, err) return @@ -188,52 +193,52 @@ func (u *UserApi) GetUsersOnlineTokenDetail(c *gin.Context) { // SubscriberStatus Presence status of subscribed users. func (u *UserApi) SubscriberStatus(c *gin.Context) { - a2r.Call(user.UserClient.SubscribeOrCancelUsersStatus, u.Client, c) + a2r.Call(c, user.UserClient.SubscribeOrCancelUsersStatus, u.Client) } // GetUserStatus Get the online status of the user. func (u *UserApi) GetUserStatus(c *gin.Context) { - a2r.Call(user.UserClient.GetUserStatus, u.Client, c) + a2r.Call(c, user.UserClient.GetUserStatus, u.Client) } // GetSubscribeUsersStatus Get the online status of subscribers. func (u *UserApi) GetSubscribeUsersStatus(c *gin.Context) { - a2r.Call(user.UserClient.GetSubscribeUsersStatus, u.Client, c) + a2r.Call(c, user.UserClient.GetSubscribeUsersStatus, u.Client) } // ProcessUserCommandAdd user general function add. func (u *UserApi) ProcessUserCommandAdd(c *gin.Context) { - a2r.Call(user.UserClient.ProcessUserCommandAdd, u.Client, c) + a2r.Call(c, user.UserClient.ProcessUserCommandAdd, u.Client) } // ProcessUserCommandDelete user general function delete. func (u *UserApi) ProcessUserCommandDelete(c *gin.Context) { - a2r.Call(user.UserClient.ProcessUserCommandDelete, u.Client, c) + a2r.Call(c, user.UserClient.ProcessUserCommandDelete, u.Client) } // ProcessUserCommandUpdate user general function update. func (u *UserApi) ProcessUserCommandUpdate(c *gin.Context) { - a2r.Call(user.UserClient.ProcessUserCommandUpdate, u.Client, c) + a2r.Call(c, user.UserClient.ProcessUserCommandUpdate, u.Client) } // ProcessUserCommandGet user general function get. func (u *UserApi) ProcessUserCommandGet(c *gin.Context) { - a2r.Call(user.UserClient.ProcessUserCommandGet, u.Client, c) + a2r.Call(c, user.UserClient.ProcessUserCommandGet, u.Client) } // ProcessUserCommandGet user general function get all. func (u *UserApi) ProcessUserCommandGetAll(c *gin.Context) { - a2r.Call(user.UserClient.ProcessUserCommandGetAll, u.Client, c) + a2r.Call(c, user.UserClient.ProcessUserCommandGetAll, u.Client) } func (u *UserApi) AddNotificationAccount(c *gin.Context) { - a2r.Call(user.UserClient.AddNotificationAccount, u.Client, c) + a2r.Call(c, user.UserClient.AddNotificationAccount, u.Client) } func (u *UserApi) UpdateNotificationAccountInfo(c *gin.Context) { - a2r.Call(user.UserClient.UpdateNotificationAccountInfo, u.Client, c) + a2r.Call(c, user.UserClient.UpdateNotificationAccountInfo, u.Client) } func (u *UserApi) SearchNotificationAccount(c *gin.Context) { - a2r.Call(user.UserClient.SearchNotificationAccount, u.Client, c) + a2r.Call(c, user.UserClient.SearchNotificationAccount, u.Client) } diff --git a/internal/msggateway/client.go b/internal/msggateway/client.go index 7e1ba6405..1040f2be2 100644 --- a/internal/msggateway/client.go +++ b/internal/msggateway/client.go @@ -18,7 +18,6 @@ import ( "context" "encoding/json" "fmt" - "runtime/debug" "sync" "sync/atomic" "time" @@ -132,7 +131,7 @@ func (c *Client) readMessage() { defer func() { if r := recover(); r != nil { c.closedErr = ErrPanic - fmt.Println("socket have panic err:", r, string(debug.Stack())) + log.ZPanic(c.ctx, "socket have panic err:", errs.ErrPanic(r)) } c.close() }() @@ -377,7 +376,7 @@ func (c *Client) activeHeartbeat(ctx context.Context) { go func() { defer func() { if r := recover(); r != nil { - log.ZPanic(ctx, "activeHeartbeat Panic", r) + log.ZPanic(ctx, "activeHeartbeat Panic", errs.ErrPanic(r)) } }() log.ZDebug(ctx, "server initiative send heartbeat start.") diff --git a/internal/msggateway/hub_server.go b/internal/msggateway/hub_server.go index 8eff32899..ce63bc1b9 100644 --- a/internal/msggateway/hub_server.go +++ b/internal/msggateway/hub_server.go @@ -16,12 +16,12 @@ package msggateway import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "sync/atomic" "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/sdkws" @@ -35,9 +35,15 @@ import ( ) func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error { - s.LongConnServer.SetDiscoveryRegistry(disCov, config) + userConn, err := disCov.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + s.userClient = rpcli.NewUserClient(userConn) + if err := s.LongConnServer.SetDiscoveryRegistry(ctx, disCov, config); err != nil { + return err + } msggateway.RegisterMsgGatewayServer(server, s) - s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) if s.ready != nil { return s.ready(s) } @@ -62,17 +68,16 @@ type Server struct { config *Config pushTerminal map[int]struct{} ready func(srv *Server) error - userRcp rpcclient.UserRpcClient queue *memamq.MemoryQueue + userClient *rpcli.UserClient } func (s *Server) SetLongConnServer(LongConnServer LongConnServer) { s.LongConnServer = LongConnServer } -func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server { +func NewServer(longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server { s := &Server{ - rpcPort: rpcPort, LongConnServer: longConnServer, pushTerminal: make(map[int]struct{}), config: conf, diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index 50da06097..ed7e04b7b 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -41,10 +41,6 @@ func Start(ctx context.Context, index int, conf *Config) error { if err != nil { return err } - rpcPort, err := datautil.GetElemByIndex(conf.MsgGateway.RPC.Ports, index) - if err != nil { - return err - } rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build()) if err != nil { return err @@ -57,9 +53,10 @@ func Start(ctx context.Context, index int, conf *Config) error { WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen), ) - hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { - longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges) - return nil + hubServer := NewServer(longServer, conf, func(srv *Server) error { + var err error + longServer.online, err = rpccache.NewOnlineCache(srv.userClient, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges) + return err }) go longServer.ChangeOnlineStatus(4) diff --git a/internal/msggateway/message_handler.go b/internal/msggateway/message_handler.go index 5407ba90c..9b59867d6 100644 --- a/internal/msggateway/message_handler.go +++ b/internal/msggateway/message_handler.go @@ -17,17 +17,15 @@ package msggateway import ( "context" "encoding/json" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "sync" "github.com/go-playground/validator/v10" "google.golang.org/protobuf/proto" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/push" "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/utils/jsonutil" ) @@ -102,34 +100,33 @@ func (r *Resp) String() string { } type MessageHandler interface { - GetSeq(context context.Context, data *Req) ([]byte, error) - SendMessage(context context.Context, data *Req) ([]byte, error) - SendSignalMessage(context context.Context, data *Req) ([]byte, error) - PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) - GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) - GetSeqMessage(context context.Context, data *Req) ([]byte, error) - UserLogout(context context.Context, data *Req) ([]byte, error) - SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error) + GetSeq(ctx context.Context, data *Req) ([]byte, error) + SendMessage(ctx context.Context, data *Req) ([]byte, error) + SendSignalMessage(ctx context.Context, data *Req) ([]byte, error) + PullMessageBySeqList(ctx context.Context, data *Req) ([]byte, error) + GetConversationsHasReadAndMaxSeq(ctx context.Context, data *Req) ([]byte, error) + GetSeqMessage(ctx context.Context, data *Req) ([]byte, error) + UserLogout(ctx context.Context, data *Req) ([]byte, error) + SetUserDeviceBackground(ctx context.Context, data *Req) ([]byte, bool, error) } var _ MessageHandler = (*GrpcHandler)(nil) type GrpcHandler struct { - msgRpcClient *rpcclient.MessageRpcClient - pushClient *rpcclient.PushRpcClient - validate *validator.Validate + validate *validator.Validate + msgClient *rpcli.MsgClient + pushClient *rpcli.PushMsgServiceClient } -func NewGrpcHandler(validate *validator.Validate, client discovery.SvcDiscoveryRegistry, rpcRegisterName *config.RpcRegisterName) *GrpcHandler { - msgRpcClient := rpcclient.NewMessageRpcClient(client, rpcRegisterName.Msg) - pushRpcClient := rpcclient.NewPushRpcClient(client, rpcRegisterName.Push) +func NewGrpcHandler(validate *validator.Validate, msgClient *rpcli.MsgClient, pushClient *rpcli.PushMsgServiceClient) *GrpcHandler { return &GrpcHandler{ - msgRpcClient: &msgRpcClient, - pushClient: &pushRpcClient, validate: validate, + validate: validate, + msgClient: msgClient, + pushClient: pushClient, } } -func (g GrpcHandler) GetSeq(ctx context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) GetSeq(ctx context.Context, data *Req) ([]byte, error) { req := sdkws.GetMaxSeqReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, errs.WrapMsg(err, "GetSeq: error unmarshaling request", "action", "unmarshal", "dataType", "GetMaxSeqReq") @@ -137,7 +134,7 @@ func (g GrpcHandler) GetSeq(ctx context.Context, data *Req) ([]byte, error) { if err := g.validate.Struct(&req); err != nil { return nil, errs.WrapMsg(err, "GetSeq: validation failed", "action", "validate", "dataType", "GetMaxSeqReq") } - resp, err := g.msgRpcClient.GetMaxSeq(ctx, &req) + resp, err := g.msgClient.MsgClient.GetMaxSeq(ctx, &req) if err != nil { return nil, err } @@ -150,7 +147,7 @@ func (g GrpcHandler) GetSeq(ctx context.Context, data *Req) ([]byte, error) { // SendMessage handles the sending of messages through gRPC. It unmarshals the request data, // validates the message, and then sends it using the message RPC client. -func (g GrpcHandler) SendMessage(ctx context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) SendMessage(ctx context.Context, data *Req) ([]byte, error) { var msgData sdkws.MsgData if err := proto.Unmarshal(data.Data, &msgData); err != nil { return nil, errs.WrapMsg(err, "SendMessage: error unmarshaling message data", "action", "unmarshal", "dataType", "MsgData") @@ -161,7 +158,7 @@ func (g GrpcHandler) SendMessage(ctx context.Context, data *Req) ([]byte, error) } req := msg.SendMsgReq{MsgData: &msgData} - resp, err := g.msgRpcClient.SendMsg(ctx, &req) + resp, err := g.msgClient.MsgClient.SendMsg(ctx, &req) if err != nil { return nil, err } @@ -174,8 +171,8 @@ func (g GrpcHandler) SendMessage(ctx context.Context, data *Req) ([]byte, error) return c, nil } -func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]byte, error) { - resp, err := g.msgRpcClient.SendMsg(context, nil) +func (g *GrpcHandler) SendSignalMessage(ctx context.Context, data *Req) ([]byte, error) { + resp, err := g.msgClient.MsgClient.SendMsg(ctx, nil) if err != nil { return nil, err } @@ -186,7 +183,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]by return c, nil } -func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) PullMessageBySeqList(ctx context.Context, data *Req) ([]byte, error) { req := sdkws.PullMessageBySeqsReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") @@ -194,7 +191,7 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([ if err := g.validate.Struct(data); err != nil { return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq") } - resp, err := g.msgRpcClient.PullMessageBySeqList(context, &req) + resp, err := g.msgClient.MsgClient.PullMessageBySeqs(ctx, &req) if err != nil { return nil, err } @@ -205,7 +202,7 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([ return c, nil } -func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) GetConversationsHasReadAndMaxSeq(ctx context.Context, data *Req) ([]byte, error) { req := msg.GetConversationsHasReadAndMaxSeqReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "GetConversationsHasReadAndMaxSeq") @@ -213,7 +210,7 @@ func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, d if err := g.validate.Struct(data); err != nil { return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetConversationsHasReadAndMaxSeq") } - resp, err := g.msgRpcClient.GetConversationsHasReadAndMaxSeq(context, &req) + resp, err := g.msgClient.MsgClient.GetConversationsHasReadAndMaxSeq(ctx, &req) if err != nil { return nil, err } @@ -224,7 +221,7 @@ func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, d return c, nil } -func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) GetSeqMessage(ctx context.Context, data *Req) ([]byte, error) { req := msg.GetSeqMessageReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "GetSeqMessage") @@ -232,7 +229,7 @@ func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, if err := g.validate.Struct(data); err != nil { return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetSeqMessage") } - resp, err := g.msgRpcClient.GetSeqMessage(context, &req) + resp, err := g.msgClient.MsgClient.GetSeqMessage(ctx, &req) if err != nil { return nil, err } @@ -243,12 +240,12 @@ func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, return c, nil } -func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) { +func (g *GrpcHandler) UserLogout(ctx context.Context, data *Req) ([]byte, error) { req := push.DelUserPushTokenReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "DelUserPushTokenReq") } - resp, err := g.pushClient.DelUserPushToken(context, &req) + resp, err := g.pushClient.PushMsgServiceClient.DelUserPushToken(ctx, &req) if err != nil { return nil, err } @@ -259,7 +256,7 @@ func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, err return c, nil } -func (g GrpcHandler) SetUserDeviceBackground(_ context.Context, data *Req) ([]byte, bool, error) { +func (g *GrpcHandler) SetUserDeviceBackground(ctx context.Context, data *Req) ([]byte, bool, error) { req := sdkws.SetAppBackgroundStatusReq{} if err := proto.Unmarshal(data.Data, &req); err != nil { return nil, false, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "SetAppBackgroundStatusReq") diff --git a/internal/msggateway/online.go b/internal/msggateway/online.go index f29869b6e..831ab31d3 100644 --- a/internal/msggateway/online.go +++ b/internal/msggateway/online.go @@ -87,7 +87,7 @@ func (ws *WsServer) ChangeOnlineStatus(concurrent int) { opIdCtx := mcontext.SetOperationID(context.Background(), operationIDPrefix+strconv.FormatInt(count.Add(1), 10)) ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5) defer cancel() - if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil { + if err := ws.userClient.SetUserOnlineStatus(ctx, req); err != nil { log.ZError(ctx, "update user online status", err) } for _, ss := range req.Status { diff --git a/internal/msggateway/ws_server.go b/internal/msggateway/ws_server.go index e6b4f3fa4..208289e9e 100644 --- a/internal/msggateway/ws_server.go +++ b/internal/msggateway/ws_server.go @@ -3,10 +3,7 @@ package msggateway import ( "context" "fmt" - "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" - "github.com/openimsdk/open-im-server/v3/pkg/rpccache" - pbAuth "github.com/openimsdk/protocol/auth" - "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "net/http" "sync" "sync/atomic" @@ -15,12 +12,15 @@ import ( "github.com/go-playground/validator/v10" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" + "github.com/openimsdk/open-im-server/v3/pkg/rpccache" + pbAuth "github.com/openimsdk/protocol/auth" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/utils/stringutil" "golang.org/x/sync/errgroup" ) @@ -31,7 +31,7 @@ type LongConnServer interface { GetUserAllCons(userID string) ([]*Client, bool) GetUserPlatformCons(userID string, platform int) ([]*Client, bool, bool) Validate(s any) error - SetDiscoveryRegistry(client discovery.SvcDiscoveryRegistry, config *Config) + SetDiscoveryRegistry(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config) error KickUserConn(client *Client) error UnRegister(c *Client) SetKickHandlerInfo(i *kickHandler) @@ -56,13 +56,13 @@ type WsServer struct { handshakeTimeout time.Duration writeBufferSize int validate *validator.Validate - userClient *rpcclient.UserRpcClient - authClient *rpcclient.Auth disCov discovery.SvcDiscoveryRegistry Compressor //Encoder MessageHandler webhookClient *webhook.Client + userClient *rpcli.UserClient + authClient *rpcli.AuthClient } type kickHandler struct { @@ -71,12 +71,28 @@ type kickHandler struct { newClient *Client } -func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry, config *Config) { - ws.MessageHandler = NewGrpcHandler(ws.validate, disCov, &config.Share.RpcRegisterName) - u := rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) - ws.authClient = rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) - ws.userClient = &u +func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.SvcDiscoveryRegistry, config *Config) error { + userConn, err := disCov.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + pushConn, err := disCov.GetConn(ctx, config.Share.RpcRegisterName.Push) + if err != nil { + return err + } + authConn, err := disCov.GetConn(ctx, config.Share.RpcRegisterName.Auth) + if err != nil { + return err + } + msgConn, err := disCov.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return err + } + ws.userClient = rpcli.NewUserClient(userConn) + ws.authClient = rpcli.NewAuthClient(authConn) + ws.MessageHandler = NewGrpcHandler(ws.validate, rpcli.NewMsgClient(msgConn), rpcli.NewPushMsgServiceClient(pushConn)) ws.disCov = disCov + return nil } //func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { @@ -311,7 +327,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien []string{newClient.ctx.GetOperationID(), newClient.ctx.GetUserID(), constant.PlatformIDToName(newClient.PlatformID), newClient.ctx.GetConnID()}, ) - if _, err := ws.authClient.KickTokens(ctx, kickTokens); err != nil { + if err := ws.authClient.KickTokens(ctx, kickTokens); err != nil { log.ZWarn(newClient.ctx, "kickTokens err", err) } } @@ -338,7 +354,12 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien []string{newClient.ctx.GetOperationID(), newClient.ctx.GetUserID(), constant.PlatformIDToName(newClient.PlatformID), newClient.ctx.GetConnID()}, ) - if _, err := ws.authClient.InvalidateToken(ctx, newClient.token, newClient.UserID, newClient.PlatformID); err != nil { + req := &pbAuth.InvalidateTokenReq{ + PreservedToken: newClient.token, + UserID: newClient.UserID, + PlatformID: int32(newClient.PlatformID), + } + if err := ws.authClient.InvalidateToken(ctx, req); err != nil { log.ZWarn(newClient.ctx, "InvalidateToken err", err, "userID", newClient.UserID, "platformID", newClient.PlatformID) } diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index 4e2ae29f1..42612e294 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -33,7 +33,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/config" discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mw" @@ -82,11 +81,11 @@ func Start(ctx context.Context, index int, config *Config) error { client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) - msgModel := redis.NewMsgCache(rdb) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) if err != nil { return err } + msgModel := redis.NewMsgCache(rdb, msgDocModel) seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB()) if err != nil { return err @@ -101,9 +100,7 @@ func Start(ctx context.Context, index int, config *Config) error { if err != nil { return err } - conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) - groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgTransferDatabase, &conversationRpcClient, &groupRpcClient) + historyCH, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase) if err != nil { return err } @@ -136,11 +133,6 @@ func (m *MsgTransfer) Start(index int, config *Config) error { if config.MsgTransfer.Prometheus.Enable { go func() { - defer func() { - if r := recover(); r != nil { - log.ZPanic(m.ctx, "MsgTransfer Start Panic", r) - } - }() prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index) if err != nil { netErr = err diff --git a/internal/msgtransfer/online_history_msg_handler.go b/internal/msgtransfer/online_history_msg_handler.go index 0104f6633..fce0297ee 100644 --- a/internal/msgtransfer/online_history_msg_handler.go +++ b/internal/msgtransfer/online_history_msg_handler.go @@ -18,6 +18,8 @@ import ( "context" "encoding/json" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/tools/discovery" "strconv" "strings" "sync" @@ -27,12 +29,11 @@ import ( "github.com/IBM/sarama" "github.com/go-redis/redis" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/tools/batcher" "github.com/openimsdk/protocol/constant" + pbconv "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" @@ -69,21 +70,32 @@ type OnlineHistoryRedisConsumerHandler struct { redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] msgTransferDatabase controller.MsgTransferDatabase - conversationRpcClient *rpcclient.ConversationRpcClient - groupRpcClient *rpcclient.GroupRpcClient conversationUserHasReadChan chan *userHasReadSeq wg sync.WaitGroup + + groupClient *rpcli.GroupClient + conversationClient *rpcli.ConversationClient } -func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase, - conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) { +func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) { + kafkaConf := config.KafkaConfig historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) if err != nil { return nil, err } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return nil, err + } + conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) + if err != nil { + return nil, err + } var och OnlineHistoryRedisConsumerHandler och.msgTransferDatabase = database och.conversationUserHasReadChan = make(chan *userHasReadSeq, hasReadChanBuffer) + och.groupClient = rpcli.NewGroupClient(groupConn) + och.conversationClient = rpcli.NewConversationClient(conversationConn) och.wg.Add(1) b := batcher.New[sarama.ConsumerMessage]( @@ -103,25 +115,21 @@ func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database cont } b.Do = och.do och.redisMessageBatches = b - och.conversationRpcClient = conversationRpcClient - och.groupRpcClient = groupRpcClient och.historyConsumerGroup = historyConsumerGroup - return &och, err + return &och, nil } func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) { ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID()) ctxMessages := och.parseConsumerMessages(ctx, val.Val()) ctx = withAggregationCtx(ctx, ctxMessages) - log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages), - "key", val.Key()) + log.ZInfo(ctx, "msg arrived channel", "channel id", channelID, "msgList length", len(ctxMessages), "key", val.Key()) och.doSetReadSeq(ctx, ctxMessages) storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList := och.categorizeMessageLists(ctxMessages) log.ZDebug(ctx, "number of categorized messages", "storageMsgList", len(storageMsgList), "notStorageMsgList", - len(notStorageMsgList), "storageNotificationList", len(storageNotificationList), "notStorageNotificationList", - len(notStorageNotificationList)) + len(notStorageMsgList), "storageNotificationList", len(storageNotificationList), "notStorageNotificationList", len(notStorageNotificationList)) conversationIDMsg := msgprocessor.GetChatConversationIDByMsg(ctxMessages[0].message) conversationIDNotification := msgprocessor.GetNotificationConversationIDByMsg(ctxMessages[0].message) @@ -285,22 +293,27 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key case constant.ReadGroupChatType: log.ZDebug(ctx, "group chat first create conversation", "conversationID", conversationID) - userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID) + + userIDs, err := och.groupClient.GetGroupMemberUserIDs(ctx, msg.GroupID) if err != nil { log.ZWarn(ctx, "get group member ids error", err, "conversationID", conversationID) } else { log.ZInfo(ctx, "GetGroupMemberIDs end") - if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, - msg.GroupID, userIDs); err != nil { + if err := och.conversationClient.CreateGroupChatConversations(ctx, msg.GroupID, userIDs); err != nil { log.ZWarn(ctx, "single chat first create conversation error", err, "conversationID", conversationID) } } case constant.SingleChatType, constant.NotificationChatType: - if err := och.conversationRpcClient.SingleChatFirstCreateConversation(ctx, msg.RecvID, - msg.SendID, conversationID, msg.SessionType); err != nil { + req := &pbconv.CreateSingleChatConversationsReq{ + RecvID: msg.RecvID, + SendID: msg.SendID, + ConversationID: conversationID, + ConversationType: msg.SessionType, + } + if err := och.conversationClient.CreateSingleChatConversations(ctx, req); err != nil { log.ZWarn(ctx, "single chat or notification first create conversation error", err, "conversationID", conversationID, "sessionType", msg.SessionType) } @@ -349,7 +362,7 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con func (och *OnlineHistoryRedisConsumerHandler) HandleUserHasReadSeqMessages(ctx context.Context) { defer func() { if r := recover(); r != nil { - log.ZPanic(ctx, "HandleUserHasReadSeqMessages Panic", r) + log.ZPanic(ctx, "HandleUserHasReadSeqMessages Panic", errs.ErrPanic(r)) } }() diff --git a/internal/msgtransfer/online_msg_to_mongo_handler.go b/internal/msgtransfer/online_msg_to_mongo_handler.go index 82002c26b..d8836d54e 100644 --- a/internal/msgtransfer/online_msg_to_mongo_handler.go +++ b/internal/msgtransfer/online_msg_to_mongo_handler.go @@ -77,27 +77,13 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont for _, msg := range msgFromMQ.MsgData { seqs = append(seqs, msg.Seq) } - err = mc.msgTransferDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) - if err != nil { - log.ZError( - ctx, - "remove cache msg from redis err", - err, - "msg", - msgFromMQ.MsgData, - "conversationID", - msgFromMQ.ConversationID, - ) - } } -func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } +func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } + func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } -func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim( - sess sarama.ConsumerGroupSession, - claim sarama.ConsumerGroupClaim, -) error { // an instance in the consumer group +func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // an instance in the consumer group log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset", claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) for msg := range claim.Messages() { diff --git a/internal/push/push.go b/internal/push/push.go index 7f14bced7..6f8067fc8 100644 --- a/internal/push/push.go +++ b/internal/push/push.go @@ -32,11 +32,8 @@ type Config struct { LocalCacheConfig config.LocalCache Discovery config.Discovery FcmConfigPath string -} -func (p pushServer) PushMsg(ctx context.Context, req *pbpush.PushMsgReq) (*pbpush.PushMsgResp, error) { - //todo reserved Interface - return nil, nil + runTimeEnv string } func (p pushServer) DelUserPushToken(ctx context.Context, @@ -60,7 +57,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig) - consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client) + consumer, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client) if err != nil { return err } diff --git a/internal/push/push_handler.go b/internal/push/push_handler.go index c1d1ac2f9..9defb5969 100644 --- a/internal/push/push_handler.go +++ b/internal/push/push_handler.go @@ -3,7 +3,7 @@ package push import ( "context" "encoding/json" - + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "math/rand" "strconv" "time" @@ -16,7 +16,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/rpccache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" @@ -41,14 +40,15 @@ type ConsumerHandler struct { onlineCache *rpccache.OnlineCache groupLocalCache *rpccache.GroupLocalCache conversationLocalCache *rpccache.ConversationLocalCache - msgRpcClient rpcclient.MessageRpcClient - conversationRpcClient rpcclient.ConversationRpcClient - groupRpcClient rpcclient.GroupRpcClient webhookClient *webhook.Client config *Config + userClient *rpcli.UserClient + groupClient *rpcli.GroupClient + msgClient *rpcli.MsgClient + conversationClient *rpcli.ConversationClient } -func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, +func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) { var consumerHandler ConsumerHandler var err error @@ -57,20 +57,35 @@ func NewConsumerHandler(config *Config, database controller.PushDatabase, offlin if err != nil { return nil, err } - - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return nil, err + } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return nil, err + } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return nil, err + } + conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) + if err != nil { + return nil, err + } + consumerHandler.userClient = rpcli.NewUserClient(userConn) + consumerHandler.groupClient = rpcli.NewGroupClient(groupConn) + consumerHandler.msgClient = rpcli.NewMsgClient(msgConn) + consumerHandler.conversationClient = rpcli.NewConversationClient(conversationConn) consumerHandler.offlinePusher = offlinePusher consumerHandler.onlinePusher = NewOnlinePusher(client, config) - consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb) - consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) - consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) - consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) + consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupClient, &config.LocalCacheConfig, rdb) + consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationClient, &config.LocalCacheConfig, rdb) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.config = config consumerHandler.pushDatabase = database - consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil) + consumerHandler.onlineCache, err = rpccache.NewOnlineCache(consumerHandler.userClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil) if err != nil { return nil, err } @@ -327,7 +342,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0]) } defer func(groupID string) { - if err = c.groupRpcClient.DismissGroup(ctx, groupID); err != nil { + if err := c.groupClient.DismissGroup(ctx, groupID, true); err != nil { log.ZError(ctx, "DismissGroup Notification clear members", err, "groupID", groupID) } }(groupID) @@ -353,10 +368,7 @@ func (c *ConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData func (c *ConsumerHandler) filterGroupMessageOfflinePush(ctx context.Context, groupID string, msg *sdkws.MsgData, offlinePushUserIDs []string) (userIDs []string, err error) { - - //todo local cache Obtain the difference set through local comparison. - needOfflinePushUserIDs, err := c.conversationRpcClient.GetConversationOfflinePushUserIDs( - ctx, conversationutil.GenGroupConversationID(groupID), offlinePushUserIDs) + needOfflinePushUserIDs, err := c.conversationClient.GetConversationOfflinePushUserIDs(ctx, conversationutil.GenGroupConversationID(groupID), offlinePushUserIDs) if err != nil { return nil, err } @@ -410,12 +422,11 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + maxSeq, err := c.msgClient.GetConversationMaxSeq(ctx, conversationID) if err != nil { return err } - - return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) + return c.conversationClient.SetConversationMaxSeq(ctx, conversationID, userIDs, maxSeq) } func unmarshalNotificationElem(bytes []byte, t any) error { @@ -423,6 +434,5 @@ func unmarshalNotificationElem(bytes []byte, t any) error { if err := json.Unmarshal(bytes, ¬ification); err != nil { return err } - return json.Unmarshal([]byte(notification.Detail), t) } diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go index f0f4a022d..9df012e86 100644 --- a/internal/rpc/auth/auth.go +++ b/internal/rpc/auth/auth.go @@ -17,6 +17,7 @@ package auth import ( "context" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/common/config" redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" @@ -28,7 +29,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" pbauth "github.com/openimsdk/protocol/auth" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msggateway" @@ -42,9 +42,9 @@ import ( type authServer struct { pbauth.UnimplementedAuthServer authDatabase controller.AuthDatabase - userRpcClient *rpcclient.UserRpcClient RegisterCenter discovery.SvcDiscoveryRegistry config *Config + userClient *rpcli.UserClient } type Config struct { @@ -59,9 +59,11 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } pbauth.RegisterAuthServer(server, &authServer{ - userRpcClient: &userRpcClient, RegisterCenter: client, authDatabase: controller.NewAuthDatabase( redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire), @@ -70,7 +72,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg config.Share.MultiLogin, config.Share.IMAdminUserID, ), - config: config, + config: config, + userClient: rpcli.NewUserClient(userConn), }) return nil } @@ -86,7 +89,7 @@ func (s *authServer) GetAdminToken(ctx context.Context, req *pbauth.GetAdminToke } - if _, err := s.userRpcClient.GetUserInfo(ctx, req.UserID); err != nil { + if err := s.userClient.CheckUser(ctx, []string{req.UserID}); err != nil { return nil, err } @@ -115,7 +118,7 @@ func (s *authServer) GetUserToken(ctx context.Context, req *pbauth.GetUserTokenR if authverify.IsManagerUserID(req.UserID, s.config.Share.IMAdminUserID) { return nil, errs.ErrNoPermission.WrapMsg("don't get Admin token") } - if _, err := s.userRpcClient.GetUserInfo(ctx, req.UserID); err != nil { + if err := s.userClient.CheckUser(ctx, []string{req.UserID}); err != nil { return nil, err } token, err := s.authDatabase.CreateToken(ctx, req.UserID, int(req.PlatformID)) @@ -130,7 +133,7 @@ func (s *authServer) GetUserToken(ctx context.Context, req *pbauth.GetUserTokenR func (s *authServer) parseToken(ctx context.Context, tokensString string) (claims *tokenverify.Claims, err error) { claims, err = tokenverify.GetClaimFromToken(tokensString, authverify.Secret(s.config.Share.Secret)) if err != nil { - return nil, errs.Wrap(err) + return nil, err } isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID) if isAdmin { @@ -156,10 +159,7 @@ func (s *authServer) parseToken(ctx context.Context, tokensString string) (claim return nil, servererrs.ErrTokenNotExist.Wrap() } -func (s *authServer) ParseToken( - ctx context.Context, - req *pbauth.ParseTokenReq, -) (resp *pbauth.ParseTokenResp, err error) { +func (s *authServer) ParseToken(ctx context.Context, req *pbauth.ParseTokenReq) (resp *pbauth.ParseTokenResp, err error) { resp = &pbauth.ParseTokenResp{} claims, err := s.parseToken(ctx, req.Token) if err != nil { diff --git a/internal/rpc/conversation/conversation.go b/internal/rpc/conversation/conversation.go index 6a8a6d800..bfc925008 100644 --- a/internal/rpc/conversation/conversation.go +++ b/internal/rpc/conversation/conversation.go @@ -16,8 +16,7 @@ package conversation import ( "context" - "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - pbmsg "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "sort" "time" @@ -27,12 +26,12 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" dbModel "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" pbconversation "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/sdkws" @@ -46,13 +45,14 @@ import ( type conversationServer struct { pbconversation.UnimplementedConversationServer - msgRpcClient *rpcclient.MessageRpcClient - user *rpcclient.UserRpcClient - groupRpcClient *rpcclient.GroupRpcClient conversationDatabase controller.ConversationDatabase conversationNotificationSender *ConversationNotificationSender config *Config + + userClient *rpcli.UserClient + msgClient *rpcli.MsgClient + groupClient *rpcli.GroupClient } type Config struct { @@ -78,17 +78,27 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return err + } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return err + } + msgClient := rpcli.NewMsgClient(msgConn) localcache.InitLocalCache(&config.LocalCacheConfig) pbconversation.RegisterConversationServer(server, &conversationServer{ - msgRpcClient: &msgRpcClient, - user: &userRpcClient, - conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, &msgRpcClient), - groupRpcClient: &groupRpcClient, + conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, msgClient), conversationDatabase: controller.NewConversationDatabase(conversationDB, redis.NewConversationRedis(rdb, &config.LocalCacheConfig, redis.GetRocksCacheOptions(), conversationDB), mgocli.GetTx()), + userClient: rpcli.NewUserClient(userConn), + groupClient: rpcli.NewGroupClient(groupConn), + msgClient: msgClient, }) return nil } @@ -125,13 +135,12 @@ func (c *conversationServer) GetSortedConversationList(ctx context.Context, req if len(conversations) == 0 { return nil, errs.ErrRecordNotFound.Wrap() } - - maxSeqs, err := c.msgRpcClient.GetMaxSeqs(ctx, conversationIDs) + maxSeqs, err := c.msgClient.GetMaxSeqs(ctx, conversationIDs) if err != nil { return nil, err } - chatLogs, err := c.msgRpcClient.GetMsgByConversationIDs(ctx, conversationIDs, maxSeqs) + chatLogs, err := c.msgClient.GetMsgByConversationIDs(ctx, conversationIDs, maxSeqs) if err != nil { return nil, err } @@ -141,7 +150,7 @@ func (c *conversationServer) GetSortedConversationList(ctx context.Context, req return nil, err } - hasReadSeqs, err := c.msgRpcClient.GetHasReadSeqs(ctx, req.UserID, conversationIDs) + hasReadSeqs, err := c.msgClient.GetHasReadSeqs(ctx, conversationIDs, req.UserID) if err != nil { return nil, err } @@ -230,7 +239,7 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver } if req.Conversation.ConversationType == constant.WriteGroupChatType { - groupInfo, err := c.groupRpcClient.GetGroupInfo(ctx, req.Conversation.GroupID) + groupInfo, err := c.groupClient.GetGroupInfo(ctx, req.Conversation.GroupID) if err != nil { return nil, err } @@ -354,7 +363,15 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver needUpdateUsersList = append(needUpdateUsersList, userID) } } + if len(m) != 0 && len(needUpdateUsersList) != 0 { + if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, needUpdateUsersList, &conversation, m); err != nil { + return nil, err + } + for _, v := range needUpdateUsersList { + c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) + } + } if req.Conversation.IsPrivateChat != nil && req.Conversation.ConversationType != constant.ReadGroupChatType { var conversations []*dbModel.Conversation for _, ownerUserID := range req.UserIDs { @@ -372,16 +389,6 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver c.conversationNotificationSender.ConversationSetPrivateNotification(ctx, userID, req.Conversation.UserID, req.Conversation.IsPrivateChat.Value, req.Conversation.ConversationID) } - } else { - if len(m) != 0 && len(needUpdateUsersList) != 0 { - if err := c.conversationDatabase.SetUsersConversationFieldTx(ctx, needUpdateUsersList, &conversation, m); err != nil { - return nil, err - } - - for _, v := range needUpdateUsersList { - c.conversationNotificationSender.ConversationChangeNotification(ctx, v, []string{req.Conversation.ConversationID}) - } - } } return &pbconversation.SetConversationsResp{}, nil @@ -436,14 +443,14 @@ func (c *conversationServer) CreateGroupChatConversations(ctx context.Context, r return nil, err } conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupID) - if _, err := c.msgRpcClient.Client.SetUserConversationMaxSeq(ctx, &pbmsg.SetUserConversationMaxSeqReq{ConversationID: conversationID, OwnerUserID: req.UserIDs, MaxSeq: 0}); err != nil { + if err := c.msgClient.SetUserConversationMaxSeq(ctx, conversationID, req.UserIDs, 0); err != nil { return nil, err } return &pbconversation.CreateGroupChatConversationsResp{}, nil } func (c *conversationServer) SetConversationMaxSeq(ctx context.Context, req *pbconversation.SetConversationMaxSeqReq) (*pbconversation.SetConversationMaxSeqResp, error) { - if _, err := c.msgRpcClient.Client.SetUserConversationMaxSeq(ctx, &pbmsg.SetUserConversationMaxSeqReq{ConversationID: req.ConversationID, OwnerUserID: req.OwnerUserID, MaxSeq: req.MaxSeq}); err != nil { + if err := c.msgClient.SetUserConversationMaxSeq(ctx, req.ConversationID, req.OwnerUserID, req.MaxSeq); err != nil { return nil, err } if err := c.conversationDatabase.UpdateUsersConversationField(ctx, req.OwnerUserID, req.ConversationID, @@ -457,7 +464,7 @@ func (c *conversationServer) SetConversationMaxSeq(ctx context.Context, req *pbc } func (c *conversationServer) SetConversationMinSeq(ctx context.Context, req *pbconversation.SetConversationMinSeqReq) (*pbconversation.SetConversationMinSeqResp, error) { - if _, err := c.msgRpcClient.Client.SetUserConversationMinSeq(ctx, &pbmsg.SetUserConversationMinSeqReq{ConversationID: req.ConversationID, OwnerUserID: req.OwnerUserID, MinSeq: req.MinSeq}); err != nil { + if err := c.msgClient.SetUserConversationMin(ctx, req.ConversationID, req.OwnerUserID, req.MinSeq); err != nil { return nil, err } if err := c.conversationDatabase.UpdateUsersConversationField(ctx, req.OwnerUserID, req.ConversationID, @@ -567,7 +574,7 @@ func (c *conversationServer) getConversationInfo( } } if len(sendIDs) != 0 { - sendInfos, err := c.user.GetUsersInfo(ctx, sendIDs) + sendInfos, err := c.userClient.GetUsersInfo(ctx, sendIDs) if err != nil { return nil, err } @@ -576,7 +583,7 @@ func (c *conversationServer) getConversationInfo( } } if len(groupIDs) != 0 { - groupInfos, err := c.groupRpcClient.GetGroupInfos(ctx, groupIDs, false) + groupInfos, err := c.groupClient.GetGroupsInfo(ctx, groupIDs) if err != nil { return nil, err } @@ -753,3 +760,51 @@ func (c *conversationServer) GetPinnedConversationIDs(ctx context.Context, req * } return &pbconversation.GetPinnedConversationIDsResp{ConversationIDs: conversationIDs}, nil } + +func (c *conversationServer) ClearUserConversationMsg(ctx context.Context, req *pbconversation.ClearUserConversationMsgReq) (*pbconversation.ClearUserConversationMsgResp, error) { + conversations, err := c.conversationDatabase.FindRandConversation(ctx, req.Timestamp, int(req.Limit)) + if err != nil { + return nil, err + } + latestMsgDestructTime := time.UnixMilli(req.Timestamp) + for i, conversation := range conversations { + if conversation.IsMsgDestruct == false || conversation.MsgDestructTime == 0 { + continue + } + seq, err := c.msgClient.GetLastMessageSeqByTime(ctx, conversation.ConversationID, req.Timestamp-conversation.MsgDestructTime) + if err != nil { + return nil, err + } + if seq <= 0 { + log.ZDebug(ctx, "ClearUserConversationMsg GetLastMessageSeqByTime seq <= 0", "index", i, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID, "msgDestructTime", conversation.MsgDestructTime, "seq", seq) + if err := c.setConversationMinSeqAndLatestMsgDestructTime(ctx, conversation.ConversationID, conversation.OwnerUserID, -1, latestMsgDestructTime); err != nil { + return nil, err + } + continue + } + seq++ + if err := c.setConversationMinSeqAndLatestMsgDestructTime(ctx, conversation.ConversationID, conversation.OwnerUserID, seq, latestMsgDestructTime); err != nil { + return nil, err + } + log.ZDebug(ctx, "ClearUserConversationMsg set min seq", "index", i, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID, "seq", seq, "msgDestructTime", conversation.MsgDestructTime) + } + return &pbconversation.ClearUserConversationMsgResp{Count: int32(len(conversations))}, nil +} + +func (c *conversationServer) setConversationMinSeqAndLatestMsgDestructTime(ctx context.Context, conversationID string, ownerUserID string, minSeq int64, latestMsgDestructTime time.Time) error { + update := map[string]any{ + "latest_msg_destruct_time": latestMsgDestructTime, + } + if minSeq >= 0 { + if err := c.msgClient.SetUserConversationMin(ctx, conversationID, []string{ownerUserID}, minSeq); err != nil { + return err + } + update["min_seq"] = minSeq + } + + if err := c.conversationDatabase.UpdateUsersConversationField(ctx, []string{ownerUserID}, conversationID, update); err != nil { + return err + } + c.conversationNotificationSender.ConversationChangeNotification(ctx, ownerUserID, []string{conversationID}) + return nil +} diff --git a/internal/rpc/conversation/notification.go b/internal/rpc/conversation/notification.go index 994e1d57a..c6368b916 100644 --- a/internal/rpc/conversation/notification.go +++ b/internal/rpc/conversation/notification.go @@ -16,9 +16,11 @@ package conversation import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/protocol/msg" "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/notification" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" ) @@ -27,8 +29,10 @@ type ConversationNotificationSender struct { *rpcclient.NotificationSender } -func NewConversationNotificationSender(conf *config.Notification, msgRpcClient *rpcclient.MessageRpcClient) *ConversationNotificationSender { - return &ConversationNotificationSender{rpcclient.NewNotificationSender(conf, rpcclient.WithRpcClient(msgRpcClient))} +func NewConversationNotificationSender(conf *config.Notification, msgClient *rpcli.MsgClient) *ConversationNotificationSender { + return &ConversationNotificationSender{rpcclient.NewNotificationSender(conf, rpcclient.WithRpcClient(func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { + return msgClient.SendMsg(ctx, req) + }))} } // SetPrivate invote. diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index 9e610df0f..649027fb8 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -17,6 +17,7 @@ package group import ( "context" "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "math/big" "math/rand" "strconv" @@ -36,9 +37,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/grouphash" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification/grouphash" "github.com/openimsdk/protocol/constant" pbconversation "github.com/openimsdk/protocol/conversation" pbgroup "github.com/openimsdk/protocol/group" @@ -58,13 +57,13 @@ import ( type groupServer struct { pbgroup.UnimplementedGroupServer - db controller.GroupDatabase - user rpcclient.UserRpcClient - notification *GroupNotificationSender - conversationRpcClient rpcclient.ConversationRpcClient - msgRpcClient rpcclient.MessageRpcClient - config *Config - webhookClient *webhook.Client + db controller.GroupDatabase + notification *NotificationSender + config *Config + webhookClient *webhook.Client + userClient *rpcli.UserClient + msgClient *rpcli.MsgClient + conversationClient *rpcli.ConversationClient } type Config struct { @@ -99,32 +98,33 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) - msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) - conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) - var gs groupServer - database := controller.NewGroupDatabase(rdb, &config.LocalCacheConfig, groupDB, groupMemberDB, groupRequestDB, mgocli.GetTx(), grouphash.NewGroupHashFromGroupServer(&gs)) - gs.db = database - gs.user = userRpcClient - gs.notification = NewGroupNotificationSender( - database, - &msgRpcClient, - &userRpcClient, - &conversationRpcClient, - config, - func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) { - users, err := userRpcClient.GetUsersInfo(ctx, userIDs) - if err != nil { - return nil, err - } - return datautil.Slice(users, func(e *sdkws.UserInfo) notification.CommonUser { return e }), nil - }, - ) + + //userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) + //msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) + //conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) + + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return err + } + conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) + if err != nil { + return err + } + gs := groupServer{ + config: config, + webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), + userClient: rpcli.NewUserClient(userConn), + msgClient: rpcli.NewMsgClient(msgConn), + conversationClient: rpcli.NewConversationClient(conversationConn), + } + gs.db = controller.NewGroupDatabase(rdb, &config.LocalCacheConfig, groupDB, groupMemberDB, groupRequestDB, mgocli.GetTx(), grouphash.NewGroupHashFromGroupServer(&gs)) + gs.notification = NewNotificationSender(gs.db, config, gs.userClient, gs.msgClient, gs.conversationClient) localcache.InitLocalCache(&config.LocalCacheConfig) - gs.conversationRpcClient = conversationRpcClient - gs.msgRpcClient = msgRpcClient - gs.config = config - gs.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) pbgroup.RegisterGroupServer(server, &gs) return nil } @@ -168,19 +168,6 @@ func (g *groupServer) CheckGroupAdmin(ctx context.Context, groupID string) error return nil } -func (g *groupServer) GetPublicUserInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.PublicUserInfo, error) { - if len(userIDs) == 0 { - return map[string]*sdkws.PublicUserInfo{}, nil - } - users, err := g.user.GetPublicUserInfos(ctx, userIDs) - if err != nil { - return nil, err - } - return datautil.SliceToMapAny(users, func(e *sdkws.PublicUserInfo) (string, *sdkws.PublicUserInfo) { - return e.UserID, e - }), nil -} - func (g *groupServer) IsNotFound(err error) bool { return errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) } @@ -222,7 +209,6 @@ func (g *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, errs.ErrArgs.WrapMsg("no group owner") } if err := authverify.CheckAccessV3(ctx, req.OwnerUserID, g.config.Share.IMAdminUserID); err != nil { - return nil, err } userIDs := append(append(req.MemberUserIDs, req.AdminUserIDs...), req.OwnerUserID) @@ -235,7 +221,7 @@ func (g *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR return nil, errs.ErrArgs.WrapMsg("group member repeated") } - userMap, err := g.user.GetUsersInfoMap(ctx, userIDs) + userMap, err := g.userClient.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } @@ -386,7 +372,7 @@ func (g *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite return nil, servererrs.ErrDismissedAlready.WrapMsg("group dismissed checking group status found it dismissed") } - userMap, err := g.user.GetUsersInfoMap(ctx, req.InvitedUserIDs) + userMap, err := g.userClient.GetUsersInfoMap(ctx, req.InvitedUserIDs) if err != nil { return nil, err } @@ -697,7 +683,7 @@ func (g *groupServer) GetGroupApplicationList(ctx context.Context, req *pbgroup. userIDs = append(userIDs, gr.UserID) } userIDs = datautil.Distinct(userIDs) - userMap, err := g.user.GetPublicUserInfoMap(ctx, userIDs) + userMap, err := g.userClient.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } @@ -809,7 +795,7 @@ func (g *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup } else if !g.IsNotFound(err) { return nil, err } - if _, err := g.user.GetPublicUserInfo(ctx, req.FromUserID); err != nil { + if err := g.userClient.CheckUser(ctx, []string{req.FromUserID}); err != nil { return nil, err } var member *model.GroupMember @@ -853,7 +839,7 @@ func (g *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup } func (g *groupServer) JoinGroup(ctx context.Context, req *pbgroup.JoinGroupReq) (*pbgroup.JoinGroupResp, error) { - user, err := g.user.GetUserInfo(ctx, req.InviterUserID) + user, err := g.userClient.GetUserInfo(ctx, req.InviterUserID) if err != nil { return nil, err } @@ -959,12 +945,12 @@ func (g *groupServer) QuitGroup(ctx context.Context, req *pbgroup.QuitGroupReq) } func (g *groupServer) deleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { - conevrsationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conevrsationID) + conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) + maxSeq, err := g.msgClient.GetConversationMaxSeq(ctx, conversationID) if err != nil { return err } - return g.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conevrsationID, maxSeq) + return g.conversationClient.SetConversationMaxSeq(ctx, conversationID, userIDs, maxSeq) } func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInfoReq) (*pbgroup.SetGroupInfoResp, error) { @@ -1040,7 +1026,7 @@ func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf return } conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} - if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + if err := g.conversationClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) } }() @@ -1153,8 +1139,7 @@ func (g *groupServer) SetGroupInfoEx(ctx context.Context, req *pbgroup.SetGroupI } conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} - - if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { + if err := g.conversationClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation) } }() @@ -1306,7 +1291,7 @@ func (g *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr } func (g *groupServer) GetUserReqApplicationList(ctx context.Context, req *pbgroup.GetUserReqApplicationListReq) (*pbgroup.GetUserReqApplicationListResp, error) { - user, err := g.user.GetPublicUserInfo(ctx, req.UserID) + user, err := g.userClient.GetUserInfo(ctx, req.UserID) if err != nil { return nil, err } @@ -1762,7 +1747,7 @@ func (g *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req * return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) } - userMap, err := g.user.GetPublicUserInfoMap(ctx, req.UserIDs) + userMap, err := g.userClient.GetUsersInfoMap(ctx, req.UserIDs) if err != nil { return nil, err } @@ -1793,7 +1778,7 @@ func (g *groupServer) GetGroupUsersReqApplicationList(ctx context.Context, req * ownerUserID = owner.UserID } - var userInfo *sdkws.PublicUserInfo + var userInfo *sdkws.UserInfo if user, ok := userMap[e.UserID]; !ok { userInfo = user } @@ -1839,7 +1824,7 @@ func (g *groupServer) GetSpecifiedUserGroupRequestInfo(ctx context.Context, req return nil, err } - userInfos, err := g.user.GetPublicUserInfos(ctx, []string{req.UserID}) + userInfos, err := g.userClient.GetUsersInfo(ctx, []string{req.UserID}) if err != nil { return nil, err } diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 54a6146f5..1aa5333b4 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -18,6 +18,10 @@ import ( "context" "errors" "fmt" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" @@ -26,8 +30,8 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification/common_user" "github.com/openimsdk/protocol/constant" pbgroup "github.com/openimsdk/protocol/group" "github.com/openimsdk/protocol/msg" @@ -38,7 +42,6 @@ import ( "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/stringutil" "go.mongodb.org/mongo-driver/mongo" - "time" ) // GroupApplicationReceiver @@ -47,36 +50,38 @@ const ( adminReceiver ) -func NewGroupNotificationSender( - db controller.GroupDatabase, - msgRpcClient *rpcclient.MessageRpcClient, - userRpcClient *rpcclient.UserRpcClient, - conversationRpcClient *rpcclient.ConversationRpcClient, - config *Config, - fn func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error), -) *GroupNotificationSender { - return &GroupNotificationSender{ - NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient), rpcclient.WithUserRpcClient(userRpcClient)), - getUsersInfo: fn, +func NewNotificationSender(db controller.GroupDatabase, config *Config, userClient *rpcli.UserClient, msgClient *rpcli.MsgClient, conversationClient *rpcli.ConversationClient) *NotificationSender { + return &NotificationSender{ + NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, + rpcclient.WithRpcClient(func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { + return msgClient.SendMsg(ctx, req) + }), + rpcclient.WithUserRpcClient(userClient.GetUserInfo), + ), + getUsersInfo: func(ctx context.Context, userIDs []string) ([]common_user.CommonUser, error) { + users, err := userClient.GetUsersInfo(ctx, userIDs) + if err != nil { + return nil, err + } + return datautil.Slice(users, func(e *sdkws.UserInfo) common_user.CommonUser { return e }), nil + }, db: db, config: config, - - conversationRpcClient: conversationRpcClient, - msgRpcClient: msgRpcClient, + msgClient: msgClient, + conversationClient: conversationClient, } } -type GroupNotificationSender struct { +type NotificationSender struct { *rpcclient.NotificationSender - getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) - db controller.GroupDatabase - config *Config - - conversationRpcClient *rpcclient.ConversationRpcClient - msgRpcClient *rpcclient.MessageRpcClient + getUsersInfo func(ctx context.Context, userIDs []string) ([]common_user.CommonUser, error) + db controller.GroupDatabase + config *Config + msgClient *rpcli.MsgClient + conversationClient *rpcli.ConversationClient } -func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error { +func (g *NotificationSender) PopulateGroupMember(ctx context.Context, members ...*model.GroupMember) error { if len(members) == 0 { return nil } @@ -91,7 +96,7 @@ func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, membe if err != nil { return err } - userMap := make(map[string]notification.CommonUser) + userMap := make(map[string]common_user.CommonUser) for i, user := range users { userMap[user.GetUserID()] = users[i] } @@ -111,7 +116,7 @@ func (g *GroupNotificationSender) PopulateGroupMember(ctx context.Context, membe return nil } -func (g *GroupNotificationSender) getUser(ctx context.Context, userID string) (*sdkws.PublicUserInfo, error) { +func (g *NotificationSender) getUser(ctx context.Context, userID string) (*sdkws.PublicUserInfo, error) { users, err := g.getUsersInfo(ctx, []string{userID}) if err != nil { return nil, err @@ -127,7 +132,7 @@ func (g *GroupNotificationSender) getUser(ctx context.Context, userID string) (* }, nil } -func (g *GroupNotificationSender) getGroupInfo(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { +func (g *NotificationSender) getGroupInfo(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { gm, err := g.db.TakeGroup(ctx, groupID) if err != nil { return nil, err @@ -148,7 +153,7 @@ func (g *GroupNotificationSender) getGroupInfo(ctx context.Context, groupID stri return convert.Db2PbGroupInfo(gm, ownerUserID, num), nil } -func (g *GroupNotificationSender) getGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { +func (g *NotificationSender) getGroupMembers(ctx context.Context, groupID string, userIDs []string) ([]*sdkws.GroupMemberFullInfo, error) { members, err := g.db.FindGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err @@ -164,7 +169,7 @@ func (g *GroupNotificationSender) getGroupMembers(ctx context.Context, groupID s return res, nil } -func (g *GroupNotificationSender) getGroupMemberMap(ctx context.Context, groupID string, userIDs []string) (map[string]*sdkws.GroupMemberFullInfo, error) { +func (g *NotificationSender) getGroupMemberMap(ctx context.Context, groupID string, userIDs []string) (map[string]*sdkws.GroupMemberFullInfo, error) { members, err := g.getGroupMembers(ctx, groupID, userIDs) if err != nil { return nil, err @@ -176,7 +181,7 @@ func (g *GroupNotificationSender) getGroupMemberMap(ctx context.Context, groupID return m, nil } -func (g *GroupNotificationSender) getGroupMember(ctx context.Context, groupID string, userID string) (*sdkws.GroupMemberFullInfo, error) { +func (g *NotificationSender) getGroupMember(ctx context.Context, groupID string, userID string) (*sdkws.GroupMemberFullInfo, error) { members, err := g.getGroupMembers(ctx, groupID, []string{userID}) if err != nil { return nil, err @@ -187,7 +192,7 @@ func (g *GroupNotificationSender) getGroupMember(ctx context.Context, groupID st return members[0], nil } -func (g *GroupNotificationSender) getGroupOwnerAndAdminUserID(ctx context.Context, groupID string) ([]string, error) { +func (g *NotificationSender) getGroupOwnerAndAdminUserID(ctx context.Context, groupID string) ([]string, error) { members, err := g.db.FindGroupMemberRoleLevels(ctx, groupID, []int32{constant.GroupOwner, constant.GroupAdmin}) if err != nil { return nil, err @@ -199,7 +204,7 @@ func (g *GroupNotificationSender) getGroupOwnerAndAdminUserID(ctx context.Contex return datautil.Slice(members, fn), nil } -func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, appMangerLevel int32) *sdkws.GroupMemberFullInfo { +func (g *NotificationSender) groupMemberDB2PB(member *model.GroupMember, appMangerLevel int32) *sdkws.GroupMemberFullInfo { return &sdkws.GroupMemberFullInfo{ GroupID: member.GroupID, UserID: member.UserID, @@ -216,7 +221,7 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap } } -/* func (g *GroupNotificationSender) getUsersInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error) { +/* func (g *NotificationSender) getUsersInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error) { users, err := g.getUsersInfo(ctx, userIDs) if err != nil { return nil, err @@ -228,11 +233,11 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap return result, nil } */ -func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { +func (g *NotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { return g.fillOpUserByUserID(ctx, mcontext.GetOpUserID(ctx), opUser, groupID) } -func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error { +func (g *NotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error { if opUser == nil { return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil") } @@ -276,7 +281,7 @@ func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID return nil } -func (g *GroupNotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) { +func (g *NotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) { versions := versionctx.GetVersionLog(ctx).Get() for _, coll := range versions { if coll.Name == collName && coll.Doc.DID == id { @@ -287,7 +292,7 @@ func (g *GroupNotificationSender) setVersion(ctx context.Context, version *uint6 } } -func (g *GroupNotificationSender) setSortVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string, sortVersion *uint64) { +func (g *NotificationSender) setSortVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string, sortVersion *uint64) { versions := versionctx.GetVersionLog(ctx).Get() for _, coll := range versions { if coll.Name == collName && coll.Doc.DID == id { @@ -302,7 +307,7 @@ func (g *GroupNotificationSender) setSortVersion(ctx context.Context, version *u } } -func (g *GroupNotificationSender) GroupCreatedNotification(ctx context.Context, tips *sdkws.GroupCreatedTips) { +func (g *NotificationSender) GroupCreatedNotification(ctx context.Context, tips *sdkws.GroupCreatedTips) { var err error defer func() { if err != nil { @@ -316,7 +321,7 @@ func (g *GroupNotificationSender) GroupCreatedNotification(ctx context.Context, g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupCreatedNotification, tips) } -func (g *GroupNotificationSender) GroupInfoSetNotification(ctx context.Context, tips *sdkws.GroupInfoSetTips) { +func (g *NotificationSender) GroupInfoSetNotification(ctx context.Context, tips *sdkws.GroupInfoSetTips) { var err error defer func() { if err != nil { @@ -330,7 +335,7 @@ func (g *GroupNotificationSender) GroupInfoSetNotification(ctx context.Context, g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetNotification, tips, rpcclient.WithRpcGetUserName()) } -func (g *GroupNotificationSender) GroupInfoSetNameNotification(ctx context.Context, tips *sdkws.GroupInfoSetNameTips) { +func (g *NotificationSender) GroupInfoSetNameNotification(ctx context.Context, tips *sdkws.GroupInfoSetNameTips) { var err error defer func() { if err != nil { @@ -344,7 +349,7 @@ func (g *GroupNotificationSender) GroupInfoSetNameNotification(ctx context.Conte g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetNameNotification, tips) } -func (g *GroupNotificationSender) GroupInfoSetAnnouncementNotification(ctx context.Context, tips *sdkws.GroupInfoSetAnnouncementTips) { +func (g *NotificationSender) GroupInfoSetAnnouncementNotification(ctx context.Context, tips *sdkws.GroupInfoSetAnnouncementTips) { var err error defer func() { if err != nil { @@ -358,7 +363,7 @@ func (g *GroupNotificationSender) GroupInfoSetAnnouncementNotification(ctx conte g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupInfoSetAnnouncementNotification, tips, rpcclient.WithRpcGetUserName()) } -func (g *GroupNotificationSender) JoinGroupApplicationNotification(ctx context.Context, req *pbgroup.JoinGroupReq) { +func (g *NotificationSender) JoinGroupApplicationNotification(ctx context.Context, req *pbgroup.JoinGroupReq) { var err error defer func() { if err != nil { @@ -386,7 +391,7 @@ func (g *GroupNotificationSender) JoinGroupApplicationNotification(ctx context.C } } -func (g *GroupNotificationSender) MemberQuitNotification(ctx context.Context, member *sdkws.GroupMemberFullInfo) { +func (g *NotificationSender) MemberQuitNotification(ctx context.Context, member *sdkws.GroupMemberFullInfo) { var err error defer func() { if err != nil { @@ -403,7 +408,7 @@ func (g *GroupNotificationSender) MemberQuitNotification(ctx context.Context, me g.Notification(ctx, mcontext.GetOpUserID(ctx), member.GroupID, constant.MemberQuitNotification, tips) } -func (g *GroupNotificationSender) GroupApplicationAcceptedNotification(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) { +func (g *NotificationSender) GroupApplicationAcceptedNotification(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) { var err error defer func() { if err != nil { @@ -436,7 +441,7 @@ func (g *GroupNotificationSender) GroupApplicationAcceptedNotification(ctx conte } } -func (g *GroupNotificationSender) GroupApplicationRejectedNotification(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) { +func (g *NotificationSender) GroupApplicationRejectedNotification(ctx context.Context, req *pbgroup.GroupApplicationResponseReq) { var err error defer func() { if err != nil { @@ -469,7 +474,7 @@ func (g *GroupNotificationSender) GroupApplicationRejectedNotification(ctx conte } } -func (g *GroupNotificationSender) GroupOwnerTransferredNotification(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) { +func (g *NotificationSender) GroupOwnerTransferredNotification(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) { var err error defer func() { if err != nil { @@ -500,7 +505,7 @@ func (g *GroupNotificationSender) GroupOwnerTransferredNotification(ctx context. g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupOwnerTransferredNotification, tips) } -func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context, tips *sdkws.MemberKickedTips) { +func (g *NotificationSender) MemberKickedNotification(ctx context.Context, tips *sdkws.MemberKickedTips) { var err error defer func() { if err != nil { @@ -514,7 +519,7 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context, g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) } -func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { +func (g *NotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { var err error defer func() { if err != nil { @@ -524,20 +529,15 @@ func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(c if !g.config.RpcConfig.EnableHistoryForNewMembers { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + maxSeq, err := g.msgClient.GetConversationMaxSeq(ctx, conversationID) if err != nil { return err } - if _, err = g.msgRpcClient.SetUserConversationsMinSeq(ctx, &msg.SetUserConversationsMinSeqReq{ - UserIDs: entrantUserID, - ConversationID: conversationID, - Seq: maxSeq, - }); err != nil { + if err := g.msgClient.SetUserConversationsMinSeq(ctx, conversationID, entrantUserID, maxSeq+1); err != nil { return err } } - - if err := g.conversationRpcClient.GroupChatFirstCreateConversation(ctx, groupID, entrantUserID); err != nil { + if err := g.conversationClient.CreateGroupChatConversations(ctx, groupID, entrantUserID); err != nil { return err } @@ -573,7 +573,7 @@ func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(c return nil } -func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID string) error { +func (g *NotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID string) error { var err error defer func() { if err != nil { @@ -583,23 +583,17 @@ func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, g if !g.config.RpcConfig.EnableHistoryForNewMembers { conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) - maxSeq, err := g.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) + maxSeq, err := g.msgClient.GetConversationMaxSeq(ctx, conversationID) if err != nil { return err } - if _, err = g.msgRpcClient.SetUserConversationsMinSeq(ctx, &msg.SetUserConversationsMinSeqReq{ - UserIDs: []string{entrantUserID}, - ConversationID: conversationID, - Seq: maxSeq, - }); err != nil { + if err := g.msgClient.SetUserConversationsMinSeq(ctx, conversationID, []string{entrantUserID}, maxSeq+1); err != nil { return err } } - - if err := g.conversationRpcClient.GroupChatFirstCreateConversation(ctx, groupID, []string{entrantUserID}); err != nil { + if err := g.conversationClient.CreateGroupChatConversations(ctx, groupID, []string{entrantUserID}); err != nil { return err } - var group *sdkws.GroupInfo group, err = g.getGroupInfo(ctx, groupID) if err != nil { @@ -620,7 +614,7 @@ func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, g return nil } -func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { +func (g *NotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { var err error defer func() { if err != nil { @@ -633,7 +627,7 @@ func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.GroupDismissedNotification, tips) } -func (g *GroupNotificationSender) GroupMemberMutedNotification(ctx context.Context, groupID, groupMemberUserID string, mutedSeconds uint32) { +func (g *NotificationSender) GroupMemberMutedNotification(ctx context.Context, groupID, groupMemberUserID string, mutedSeconds uint32) { var err error defer func() { if err != nil { @@ -661,7 +655,7 @@ func (g *GroupNotificationSender) GroupMemberMutedNotification(ctx context.Conte g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberMutedNotification, tips) } -func (g *GroupNotificationSender) GroupMemberCancelMutedNotification(ctx context.Context, groupID, groupMemberUserID string) { +func (g *NotificationSender) GroupMemberCancelMutedNotification(ctx context.Context, groupID, groupMemberUserID string) { var err error defer func() { if err != nil { @@ -686,7 +680,7 @@ func (g *GroupNotificationSender) GroupMemberCancelMutedNotification(ctx context g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberCancelMutedNotification, tips) } -func (g *GroupNotificationSender) GroupMutedNotification(ctx context.Context, groupID string) { +func (g *NotificationSender) GroupMutedNotification(ctx context.Context, groupID string) { var err error defer func() { if err != nil { @@ -714,7 +708,7 @@ func (g *GroupNotificationSender) GroupMutedNotification(ctx context.Context, gr g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMutedNotification, tips) } -func (g *GroupNotificationSender) GroupCancelMutedNotification(ctx context.Context, groupID string) { +func (g *NotificationSender) GroupCancelMutedNotification(ctx context.Context, groupID string) { var err error defer func() { if err != nil { @@ -742,7 +736,7 @@ func (g *GroupNotificationSender) GroupCancelMutedNotification(ctx context.Conte g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupCancelMutedNotification, tips) } -func (g *GroupNotificationSender) GroupMemberInfoSetNotification(ctx context.Context, groupID, groupMemberUserID string) { +func (g *NotificationSender) GroupMemberInfoSetNotification(ctx context.Context, groupID, groupMemberUserID string) { var err error defer func() { if err != nil { @@ -767,7 +761,7 @@ func (g *GroupNotificationSender) GroupMemberInfoSetNotification(ctx context.Con g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberInfoSetNotification, tips) } -func (g *GroupNotificationSender) GroupMemberSetToAdminNotification(ctx context.Context, groupID, groupMemberUserID string) { +func (g *NotificationSender) GroupMemberSetToAdminNotification(ctx context.Context, groupID, groupMemberUserID string) { var err error defer func() { if err != nil { @@ -791,7 +785,7 @@ func (g *GroupNotificationSender) GroupMemberSetToAdminNotification(ctx context. g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.GroupMemberSetToAdminNotification, tips) } -func (g *GroupNotificationSender) GroupMemberSetToOrdinaryUserNotification(ctx context.Context, groupID, groupMemberUserID string) { +func (g *NotificationSender) GroupMemberSetToOrdinaryUserNotification(ctx context.Context, groupID, groupMemberUserID string) { var err error defer func() { if err != nil { diff --git a/internal/rpc/msg/clear.go b/internal/rpc/msg/clear.go index ff732136a..8e14b281e 100644 --- a/internal/rpc/msg/clear.go +++ b/internal/rpc/msg/clear.go @@ -2,135 +2,59 @@ package msg import ( "context" - "time" - "github.com/openimsdk/open-im-server/v3/pkg/authverify" - "github.com/openimsdk/open-im-server/v3/pkg/common/convert" - pbconversation "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/msg" - "github.com/openimsdk/protocol/wrapperspb" - "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" - "github.com/openimsdk/tools/mcontext" - "github.com/openimsdk/tools/utils/datautil" - "github.com/openimsdk/tools/utils/idutil" - "github.com/openimsdk/tools/utils/stringutil" - "golang.org/x/sync/errgroup" + "strings" ) -// hard delete in Database. -func (m *msgServer) DestructMsgs(ctx context.Context, req *msg.DestructMsgsReq) (_ *msg.DestructMsgsResp, err error) { +// DestructMsgs hard delete in Database. +func (m *msgServer) DestructMsgs(ctx context.Context, req *msg.DestructMsgsReq) (*msg.DestructMsgsResp, error) { if err := authverify.CheckAdmin(ctx, m.config.Share.IMAdminUserID); err != nil { return nil, err } - if req.Timestamp > time.Now().UnixMilli() { - return nil, errs.ErrArgs.WrapMsg("request millisecond timestamp error") - } - var ( - docNum int - msgNum int - start = time.Now() - getLimit = 5000 - ) - - destructMsg := func(ctx context.Context) (bool, error) { - docIDs, err := m.MsgDatabase.GetDocIDs(ctx) - if err != nil { - return false, err - } - - msgs, err := m.MsgDatabase.GetBeforeMsg(ctx, req.Timestamp, docIDs, getLimit) - if err != nil { - return false, err - } - if len(msgs) == 0 { - return false, nil - } - - for _, msg := range msgs { - index, err := m.MsgDatabase.DeleteDocMsgBefore(ctx, req.Timestamp, msg) - if err != nil { - return false, err - } - if len(index) == 0 { - return false, errs.ErrInternalServer.WrapMsg("delete doc msg failed") - } - - docNum++ - msgNum += len(index) - } - - return true, nil - } - - _, err = destructMsg(ctx) + docs, err := m.MsgDatabase.GetRandBeforeMsg(ctx, req.Timestamp, int(req.Limit)) if err != nil { - log.ZError(ctx, "clear msg failed", err, "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) return nil, err } - - log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) - - return &msg.DestructMsgsResp{}, nil -} - -// soft delete for user self -func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.ClearMsgResp, err error) { - temp := convert.ConversationsPb2DB(req.Conversations) - - batchNum := 100 - - errg, _ := errgroup.WithContext(ctx) - errg.SetLimit(100) - - for i := 0; i < len(temp); i += batchNum { - batch := temp[i:min(i+batchNum, len(temp))] - - errg.Go(func() error { - for _, conversation := range batch { - handleCtx := mcontext.NewCtx(stringutil.GetSelfFuncName() + "-" + idutil.OperationIDGenerator() + "-" + conversation.ConversationID + "-" + conversation.OwnerUserID) - log.ZDebug(handleCtx, "User MsgsDestruct", - "conversationID", conversation.ConversationID, - "ownerUserID", conversation.OwnerUserID, - "msgDestructTime", conversation.MsgDestructTime, - "lastMsgDestructTime", conversation.LatestMsgDestructTime) - - seqs, err := m.MsgDatabase.ClearUserMsgs(handleCtx, conversation.OwnerUserID, conversation.ConversationID, conversation.MsgDestructTime, conversation.LatestMsgDestructTime) - if err != nil { - log.ZError(handleCtx, "user msg destruct failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID) - continue - } - - if len(seqs) > 0 { - minseq := datautil.Max(seqs...) - - // update - if err := m.Conversation.UpdateConversation(handleCtx, - &pbconversation.UpdateConversationReq{ - UserIDs: []string{conversation.OwnerUserID}, - ConversationID: conversation.ConversationID, - LatestMsgDestructTime: wrapperspb.Int64(time.Now().UnixMilli()), - MinSeq: wrapperspb.Int64(minseq), - }); err != nil { - log.ZError(handleCtx, "updateUsersConversationField failed", err, "conversationID", conversation.ConversationID, "ownerUserID", conversation.OwnerUserID) - continue - } - - if err := m.Conversation.SetConversationMinSeq(handleCtx, []string{conversation.OwnerUserID}, conversation.ConversationID, minseq); err != nil { - return err - } - - // if you need Notify SDK client userseq is update. - // m.msgNotificationSender.UserDeleteMsgsNotification(handleCtx, conversation.OwnerUserID, conversation.ConversationID, seqs) - } + for i, doc := range docs { + if err := m.MsgDatabase.DeleteDoc(ctx, doc.DocID); err != nil { + return nil, err + } + log.ZDebug(ctx, "DestructMsgs delete doc", "index", i, "docID", doc.DocID) + index := strings.LastIndex(doc.DocID, ":") + if index < 0 { + continue + } + var minSeq int64 + for _, model := range doc.Msg { + if model.Msg == nil { + continue } - return nil - }) + if model.Msg.Seq > minSeq { + minSeq = model.Msg.Seq + } + } + if minSeq <= 0 { + continue + } + conversationID := doc.DocID[:index] + if conversationID == "" { + continue + } + minSeq++ + if err := m.MsgDatabase.SetMinSeq(ctx, conversationID, minSeq); err != nil { + return nil, err + } + log.ZDebug(ctx, "DestructMsgs delete doc set min seq", "index", i, "docID", doc.DocID, "conversationID", conversationID, "setMinSeq", minSeq) } + return &msg.DestructMsgsResp{Count: int32(len(docs))}, nil +} - if err := errg.Wait(); err != nil { +func (m *msgServer) GetLastMessageSeqByTime(ctx context.Context, req *msg.GetLastMessageSeqByTimeReq) (*msg.GetLastMessageSeqByTimeResp, error) { + seq, err := m.MsgDatabase.GetLastMessageSeqByTime(ctx, req.ConversationID, req.Time) + if err != nil { return nil, err } - - return nil, nil + return &msg.GetLastMessageSeqByTimeResp{Seq: seq}, nil } diff --git a/internal/rpc/msg/delete.go b/internal/rpc/msg/delete.go index 371c50e2e..d3485faaa 100644 --- a/internal/rpc/msg/delete.go +++ b/internal/rpc/msg/delete.go @@ -74,19 +74,13 @@ func (m *msgServer) DeleteMsgs(ctx context.Context, req *msg.DeleteMsgsReq) (*ms if err := m.MsgDatabase.DeleteMsgsPhysicalBySeqs(ctx, req.ConversationID, req.Seqs); err != nil { return nil, err } - conversations, err := m.Conversation.GetConversationsByConversationID(ctx, []string{req.ConversationID}) + conv, err := m.conversationClient.GetConversationsByConversationID(ctx, req.ConversationID) if err != nil { return nil, err } tips := &sdkws.DeleteMsgsTips{UserID: req.UserID, ConversationID: req.ConversationID, Seqs: req.Seqs} - m.notificationSender.NotificationWithSessionType( - ctx, - req.UserID, - m.conversationAndGetRecvID(conversations[0], req.UserID), - constant.DeleteMsgsNotification, - conversations[0].ConversationType, - tips, - ) + m.notificationSender.NotificationWithSessionType(ctx, req.UserID, m.conversationAndGetRecvID(conv, req.UserID), + constant.DeleteMsgsNotification, conv.ConversationType, tips) } else { if err := m.MsgDatabase.DeleteUserMsgsBySeqs(ctx, req.UserID, req.ConversationID, req.Seqs); err != nil { return nil, err @@ -112,16 +106,14 @@ func (m *msgServer) DeleteMsgPhysical(ctx context.Context, req *msg.DeleteMsgPhy return nil, err } remainTime := timeutil.GetCurrentTimestampBySecond() - req.Timestamp - for _, conversationID := range req.ConversationIDs { - if err := m.MsgDatabase.DeleteConversationMsgsAndSetMinSeq(ctx, conversationID, remainTime); err != nil { - log.ZWarn(ctx, "DeleteConversationMsgsAndSetMinSeq error", err, "conversationID", conversationID, "err", err) - } + if _, err := m.DestructMsgs(ctx, &msg.DestructMsgsReq{Timestamp: remainTime, Limit: 9999}); err != nil { + return nil, err } return &msg.DeleteMsgPhysicalResp{}, nil } func (m *msgServer) clearConversation(ctx context.Context, conversationIDs []string, userID string, deleteSyncOpt *msg.DeleteSyncOpt) error { - conversations, err := m.Conversation.GetConversationsByConversationID(ctx, conversationIDs) + conversations, err := m.conversationClient.GetConversationsByConversationIDs(ctx, conversationIDs) if err != nil { return err } @@ -144,7 +136,7 @@ func (m *msgServer) clearConversation(ctx context.Context, conversationIDs []str } ownerUserIDs := []string{userID} for conversationID, seq := range setSeqs { - if err := m.Conversation.SetConversationMinSeq(ctx, ownerUserIDs, conversationID, seq); err != nil { + if err := m.conversationClient.SetConversationMinSeq(ctx, conversationID, ownerUserIDs, seq); err != nil { return err } } diff --git a/internal/rpc/msg/notification.go b/internal/rpc/msg/notification.go index 3b13676bf..d5604286a 100644 --- a/internal/rpc/msg/notification.go +++ b/internal/rpc/msg/notification.go @@ -17,7 +17,7 @@ package msg import ( "context" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/notification" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" ) diff --git a/internal/rpc/msg/revoke.go b/internal/rpc/msg/revoke.go index b7cc7df62..97de0f48a 100644 --- a/internal/rpc/msg/revoke.go +++ b/internal/rpc/msg/revoke.go @@ -63,7 +63,8 @@ func (m *msgServer) RevokeMsg(ctx context.Context, req *msg.RevokeMsgReq) (*msg. log.ZDebug(ctx, "GetMsgBySeqs", "conversationID", req.ConversationID, "seq", req.Seq, "msg", string(data)) var role int32 if !authverify.IsAppManagerUid(ctx, m.config.Share.IMAdminUserID) { - switch msgs[0].SessionType { + sessionType := msgs[0].SessionType + switch sessionType { case constant.SingleChatType: if err := authverify.CheckAccessV3(ctx, msgs[0].SendID, m.config.Share.IMAdminUserID); err != nil { return nil, err @@ -89,7 +90,7 @@ func (m *msgServer) RevokeMsg(ctx context.Context, req *msg.RevokeMsgReq) (*msg. role = member.RoleLevel } default: - return nil, errs.ErrInternalServer.WrapMsg("msg sessionType not supported") + return nil, errs.ErrInternalServer.WrapMsg("msg sessionType not supported", "sessionType", sessionType) } } now := time.Now().UnixMilli() diff --git a/internal/rpc/msg/send.go b/internal/rpc/msg/send.go index 72b799c43..c93f18148 100644 --- a/internal/rpc/msg/send.go +++ b/internal/rpc/msg/send.go @@ -83,7 +83,7 @@ func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgDa defer func() { if r := recover(); r != nil { - log.ZPanic(nctx, "setConversationAtInfo Panic", r) + log.ZPanic(nctx, "setConversationAtInfo Panic", errs.ErrPanic(r)) } }() @@ -96,14 +96,14 @@ func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgDa ConversationType: msg.SessionType, GroupID: msg.GroupID, } + memberUserIDList, err := m.GroupLocalCache.GetGroupMemberIDs(ctx, msg.GroupID) + if err != nil { + log.ZWarn(ctx, "GetGroupMemberIDs", err) + return + } tagAll := datautil.Contain(constant.AtAllString, msg.AtUserIDList...) if tagAll { - memberUserIDList, err := m.GroupLocalCache.GetGroupMemberIDs(ctx, msg.GroupID) - if err != nil { - log.ZWarn(ctx, "GetGroupMemberIDs", err) - return - } memberUserIDList = datautil.DeleteElems(memberUserIDList, msg.SendID) @@ -113,29 +113,29 @@ func (m *msgServer) setConversationAtInfo(nctx context.Context, msg *sdkws.MsgDa conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.AtAll} } else { // @Everyone and @other people conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.AtAllAtMe} - - err = m.Conversation.SetConversations(ctx, atUserID, conversation) - if err != nil { + atUserID = datautil.SliceIntersectFuncs(atUserID, memberUserIDList, func(a string) string { return a }, func(b string) string { + return b + }) + if err := m.conversationClient.SetConversations(ctx, atUserID, conversation); err != nil { log.ZWarn(ctx, "SetConversations", err, "userID", atUserID, "conversation", conversation) } - memberUserIDList = datautil.Single(atUserID, memberUserIDList) } conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.AtAll} - - err = m.Conversation.SetConversations(ctx, memberUserIDList, conversation) - if err != nil { + if err := m.conversationClient.SetConversations(ctx, memberUserIDList, conversation); err != nil { log.ZWarn(ctx, "SetConversations", err, "userID", memberUserIDList, "conversation", conversation) } return } + atUserID = datautil.SliceIntersectFuncs(msg.AtUserIDList, memberUserIDList, func(a string) string { return a }, func(b string) string { + return b + }) conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.AtMe} - err := m.Conversation.SetConversations(ctx, msg.AtUserIDList, conversation) - if err != nil { - log.ZWarn(ctx, "SetConversations", err, msg.AtUserIDList, conversation) + if err := m.conversationClient.SetConversations(ctx, atUserID, conversation); err != nil { + log.ZWarn(ctx, "SetConversations", err, atUserID, conversation) } } diff --git a/internal/rpc/msg/server.go b/internal/rpc/msg/server.go index d02e89efb..74a6355d7 100644 --- a/internal/rpc/msg/server.go +++ b/internal/rpc/msg/server.go @@ -16,6 +16,8 @@ package msg import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" @@ -25,8 +27,8 @@ import ( "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" + "github.com/openimsdk/open-im-server/v3/pkg/notification" "github.com/openimsdk/open-im-server/v3/pkg/rpccache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/msg" @@ -35,39 +37,38 @@ import ( ) type MessageInterceptorFunc func(ctx context.Context, globalConfig *Config, req *msg.SendMsgReq) (*sdkws.MsgData, error) -type ( - // MessageInterceptorChain defines a chain of message interceptor functions. - MessageInterceptorChain []MessageInterceptorFunc - // MsgServer encapsulates dependencies required for message handling. - msgServer struct { - RegisterCenter discovery.SvcDiscoveryRegistry // Service discovery registry for service registration. - MsgDatabase controller.CommonMsgDatabase // Interface for message database operations. - Conversation *rpcclient.ConversationRpcClient // RPC client for conversation service. - UserLocalCache *rpccache.UserLocalCache // Local cache for user data. - FriendLocalCache *rpccache.FriendLocalCache // Local cache for friend data. - GroupLocalCache *rpccache.GroupLocalCache // Local cache for group data. - ConversationLocalCache *rpccache.ConversationLocalCache // Local cache for conversation data. - Handlers MessageInterceptorChain // Chain of handlers for processing messages. - notificationSender *rpcclient.NotificationSender // RPC client for sending notifications. - msgNotificationSender *MsgNotificationSender // RPC client for sending msg notifications. - config *Config // Global configuration settings. - webhookClient *webhook.Client - msg.UnimplementedMsgServer - } +// MessageInterceptorChain defines a chain of message interceptor functions. +type MessageInterceptorChain []MessageInterceptorFunc - Config struct { - RpcConfig config.Msg - RedisConfig config.Redis - MongodbConfig config.Mongo - KafkaConfig config.Kafka - NotificationConfig config.Notification - Share config.Share - WebhooksConfig config.Webhooks - LocalCacheConfig config.LocalCache - Discovery config.Discovery - } -) +type Config struct { + RpcConfig config.Msg + RedisConfig config.Redis + MongodbConfig config.Mongo + KafkaConfig config.Kafka + NotificationConfig config.Notification + Share config.Share + WebhooksConfig config.Webhooks + LocalCacheConfig config.LocalCache + Discovery config.Discovery +} + +// MsgServer encapsulates dependencies required for message handling. +type msgServer struct { + msg.UnimplementedMsgServer + RegisterCenter discovery.SvcDiscoveryRegistry // Service discovery registry for service registration. + MsgDatabase controller.CommonMsgDatabase // Interface for message database operations. + UserLocalCache *rpccache.UserLocalCache // Local cache for user data. + FriendLocalCache *rpccache.FriendLocalCache // Local cache for friend data. + GroupLocalCache *rpccache.GroupLocalCache // Local cache for group data. + ConversationLocalCache *rpccache.ConversationLocalCache // Local cache for conversation data. + Handlers MessageInterceptorChain // Chain of handlers for processing messages. + notificationSender *rpcclient.NotificationSender // RPC client for sending notifications. + msgNotificationSender *MsgNotificationSender // RPC client for sending msg notifications. + config *Config // Global configuration settings. + webhookClient *webhook.Client + conversationClient *rpcli.ConversationClient +} func (m *msgServer) addInterceptorHandler(interceptorFunc ...MessageInterceptorFunc) { m.Handlers = append(m.Handlers, interceptorFunc...) @@ -87,11 +88,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } - msgModel := redis.NewMsgCache(rdb) - conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) - groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend) + msgModel := redis.NewMsgCache(rdb, msgDocModel) seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB()) if err != nil { return err @@ -106,16 +103,33 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return err + } + friendConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Friend) + if err != nil { + return err + } + conversationConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Conversation) + if err != nil { + return err + } + conversationClient := rpcli.NewConversationClient(conversationConn) s := &msgServer{ - Conversation: &conversationClient, MsgDatabase: msgDatabase, RegisterCenter: client, - UserLocalCache: rpccache.NewUserLocalCache(userRpcClient, &config.LocalCacheConfig, rdb), - GroupLocalCache: rpccache.NewGroupLocalCache(groupRpcClient, &config.LocalCacheConfig, rdb), + UserLocalCache: rpccache.NewUserLocalCache(rpcli.NewUserClient(userConn), &config.LocalCacheConfig, rdb), + GroupLocalCache: rpccache.NewGroupLocalCache(rpcli.NewGroupClient(groupConn), &config.LocalCacheConfig, rdb), ConversationLocalCache: rpccache.NewConversationLocalCache(conversationClient, &config.LocalCacheConfig, rdb), - FriendLocalCache: rpccache.NewFriendLocalCache(friendRpcClient, &config.LocalCacheConfig, rdb), + FriendLocalCache: rpccache.NewFriendLocalCache(rpcli.NewRelationClient(friendConn), &config.LocalCacheConfig, rdb), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), + conversationClient: conversationClient, } s.notificationSender = rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithLocalSendMsg(s.SendMsg)) @@ -139,11 +153,3 @@ func (m *msgServer) conversationAndGetRecvID(conversation *conversation.Conversa } return "" } - -func (m *msgServer) AppendStreamMsg(ctx context.Context, req *msg.AppendStreamMsgReq) (*msg.AppendStreamMsgResp, error) { - return nil, nil -} - -func (m *msgServer) GetStreamMsg(ctx context.Context, req *msg.GetStreamMsgReq) (*msg.GetStreamMsgResp, error) { - return nil, nil -} diff --git a/internal/rpc/relation/black.go b/internal/rpc/relation/black.go index d8d457dac..b795d6248 100644 --- a/internal/rpc/relation/black.go +++ b/internal/rpc/relation/black.go @@ -39,7 +39,7 @@ func (s *friendServer) GetPaginationBlacks(ctx context.Context, req *relation.Ge return nil, err } resp = &relation.GetPaginationBlacksResp{} - resp.Blacks, err = convert.BlackDB2Pb(ctx, blacks, s.userRpcClient.GetUsersInfoMap) + resp.Blacks, err = convert.BlackDB2Pb(ctx, blacks, s.userClient.GetUsersInfoMap) if err != nil { return nil, err } @@ -81,9 +81,7 @@ func (s *friendServer) AddBlack(ctx context.Context, req *relation.AddBlackReq) if err := s.webhookBeforeAddBlack(ctx, &s.config.WebhooksConfig.BeforeAddBlack, req); err != nil { return nil, err } - - _, err := s.userRpcClient.GetUsersInfo(ctx, []string{req.OwnerUserID, req.BlackUserID}) - if err != nil { + if err := s.userClient.CheckUser(ctx, []string{req.OwnerUserID, req.BlackUserID}); err != nil { return nil, err } black := model.Black{ @@ -114,7 +112,7 @@ func (s *friendServer) GetSpecifiedBlacks(ctx context.Context, req *relation.Get return nil, errs.ErrArgs.WrapMsg("userIDList repeated") } - userMap, err := s.userRpcClient.GetPublicUserInfoMap(ctx, req.UserIDList) + userMap, err := s.userClient.GetUsersInfoMap(ctx, req.UserIDList) if err != nil { return nil, err } @@ -132,13 +130,26 @@ func (s *friendServer) GetSpecifiedBlacks(ctx context.Context, req *relation.Get Blacks: make([]*sdkws.BlackInfo, 0, len(req.UserIDList)), } + toPublcUser := func(userID string) *sdkws.PublicUserInfo { + v, ok := userMap[userID] + if !ok { + return nil + } + return &sdkws.PublicUserInfo{ + UserID: v.UserID, + Nickname: v.Nickname, + FaceURL: v.FaceURL, + Ex: v.Ex, + } + } + for _, userID := range req.UserIDList { if black := blackMap[userID]; black != nil { resp.Blacks = append(resp.Blacks, &sdkws.BlackInfo{ OwnerUserID: black.OwnerUserID, CreateTime: black.CreateTime.UnixMilli(), - BlackUserInfo: userMap[userID], + BlackUserInfo: toPublcUser(userID), AddSource: black.AddSource, OperatorUserID: black.OperatorUserID, Ex: black.Ex, diff --git a/internal/rpc/relation/friend.go b/internal/rpc/relation/friend.go index 036c7aff5..e9a5818ed 100644 --- a/internal/rpc/relation/friend.go +++ b/internal/rpc/relation/friend.go @@ -16,6 +16,7 @@ package relation import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/tools/mq/memamq" @@ -31,7 +32,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/relation" "github.com/openimsdk/protocol/sdkws" @@ -44,15 +44,14 @@ import ( type friendServer struct { relation.UnimplementedFriendServer - db controller.FriendDatabase - blackDatabase controller.BlackDatabase - userRpcClient *rpcclient.UserRpcClient - notificationSender *FriendNotificationSender - conversationRpcClient rpcclient.ConversationRpcClient - RegisterCenter discovery.SvcDiscoveryRegistry - config *Config - webhookClient *webhook.Client - queue *memamq.MemoryQueue + db controller.FriendDatabase + blackDatabase controller.BlackDatabase + notificationSender *FriendNotificationSender + RegisterCenter discovery.SvcDiscoveryRegistry + config *Config + webhookClient *webhook.Client + queue *memamq.MemoryQueue + userClient *rpcli.UserClient } type Config struct { @@ -92,15 +91,21 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg return err } - // Initialize RPC clients - userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) - msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return err + } + userClient := rpcli.NewUserClient(userConn) // Initialize notification sender notificationSender := NewFriendNotificationSender( &config.NotificationConfig, - &msgRpcClient, - WithRpcFunc(userRpcClient.GetUsersInfo), + rpcli.NewMsgClient(msgConn), + WithRpcFunc(userClient.GetUsersInfo), ) localcache.InitLocalCache(&config.LocalCacheConfig) @@ -116,13 +121,12 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg blackMongoDB, redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, redis.GetRocksCacheOptions()), ), - userRpcClient: &userRpcClient, - notificationSender: notificationSender, - RegisterCenter: client, - conversationRpcClient: rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation), - config: config, - webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), - queue: memamq.NewMemoryQueue(16, 1024*1024), + notificationSender: notificationSender, + RegisterCenter: client, + config: config, + webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), + queue: memamq.NewMemoryQueue(16, 1024*1024), + userClient: userClient, }) return nil } @@ -139,7 +143,7 @@ func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *relation.Apply if err = s.webhookBeforeAddFriend(ctx, &s.config.WebhooksConfig.BeforeAddFriend, req); err != nil && err != servererrs.ErrCallbackContinue { return nil, err } - if _, err := s.userRpcClient.GetUsersInfoMap(ctx, []string{req.ToUserID, req.FromUserID}); err != nil { + if err := s.userClient.CheckUser(ctx, []string{req.ToUserID, req.FromUserID}); err != nil { return nil, err } @@ -163,7 +167,8 @@ func (s *friendServer) ImportFriends(ctx context.Context, req *relation.ImportFr if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { return nil, err } - if _, err := s.userRpcClient.GetUsersInfo(ctx, append([]string{req.OwnerUserID}, req.FriendUserIDs...)); err != nil { + + if err := s.userClient.CheckUser(ctx, append([]string{req.OwnerUserID}, req.FriendUserIDs...)); err != nil { return nil, err } if datautil.Contain(req.OwnerUserID, req.FriendUserIDs...) { @@ -304,7 +309,7 @@ func (s *friendServer) getFriend(ctx context.Context, ownerUserID string, friend if err != nil { return nil, err } - return convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap) + return convert.FriendsDB2Pb(ctx, friends, s.userClient.GetUsersInfoMap) } // Get the list of friend requests sent out proactively. @@ -316,7 +321,7 @@ func (s *friendServer) GetDesignatedFriendsApply(ctx context.Context, return nil, err } resp = &relation.GetDesignatedFriendsApplyResp{} - resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) + resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userClient.GetUsersInfoMap) if err != nil { return nil, err } @@ -335,7 +340,7 @@ func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *rel } resp = &relation.GetPaginationFriendsApplyToResp{} - resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) + resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userClient.GetUsersInfoMap) if err != nil { return nil, err } @@ -357,7 +362,7 @@ func (s *friendServer) GetPaginationFriendsApplyFrom(ctx context.Context, req *r return nil, err } - resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) + resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userClient.GetUsersInfoMap) if err != nil { return nil, err } @@ -388,7 +393,7 @@ func (s *friendServer) GetPaginationFriends(ctx context.Context, req *relation.G } resp = &relation.GetPaginationFriendsResp{} - resp.FriendsInfo, err = convert.FriendsDB2Pb(ctx, friends, s.userRpcClient.GetUsersInfoMap) + resp.FriendsInfo, err = convert.FriendsDB2Pb(ctx, friends, s.userClient.GetUsersInfoMap) if err != nil { return nil, err } @@ -421,7 +426,7 @@ func (s *friendServer) GetSpecifiedFriendsInfo(ctx context.Context, req *relatio return nil, errs.ErrArgs.WrapMsg("userIDList repeated") } - userMap, err := s.userRpcClient.GetUsersInfoMap(ctx, req.UserIDList) + userMap, err := s.userClient.GetUsersInfoMap(ctx, req.UserIDList) if err != nil { return nil, err } @@ -525,18 +530,3 @@ func (s *friendServer) UpdateFriends( s.notificationSender.FriendsInfoUpdateNotification(ctx, req.OwnerUserID, req.FriendUserIDs) return resp, nil } - -func (s *friendServer) GetIncrementalFriendsApplyTo(ctx context.Context, req *relation.GetIncrementalFriendsApplyToReq) (*relation.GetIncrementalFriendsApplyToResp, error) { - // TODO implement me - return nil, nil -} - -func (s *friendServer) GetIncrementalFriendsApplyFrom(ctx context.Context, req *relation.GetIncrementalFriendsApplyFromReq) (*relation.GetIncrementalFriendsApplyFromResp, error) { - // TODO implement me - return nil, nil -} - -func (s *friendServer) GetIncrementalBlacks(ctx context.Context, req *relation.GetIncrementalBlacksReq) (*relation.GetIncrementalBlacksResp, error) { - // TODO implement me - return nil, nil -} diff --git a/internal/rpc/relation/notification.go b/internal/rpc/relation/notification.go index 83c5d2ca9..a34a4d322 100644 --- a/internal/rpc/relation/notification.go +++ b/internal/rpc/relation/notification.go @@ -16,6 +16,9 @@ package relation import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" @@ -24,8 +27,8 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification/common_user" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/relation" "github.com/openimsdk/protocol/sdkws" @@ -35,7 +38,7 @@ import ( type FriendNotificationSender struct { *rpcclient.NotificationSender // Target not found err - getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) + getUsersInfo func(ctx context.Context, userIDs []string) ([]common_user.CommonUser, error) // db controller db controller.FriendDatabase } @@ -52,7 +55,7 @@ func WithDBFunc( fn func(ctx context.Context, userIDs []string) (users []*relationtb.User, err error), ) friendNotificationSenderOptions { return func(s *FriendNotificationSender) { - f := func(ctx context.Context, userIDs []string) (result []notification.CommonUser, err error) { + f := func(ctx context.Context, userIDs []string) (result []common_user.CommonUser, err error) { users, err := fn(ctx, userIDs) if err != nil { return nil, err @@ -70,7 +73,7 @@ func WithRpcFunc( fn func(ctx context.Context, userIDs []string) ([]*sdkws.UserInfo, error), ) friendNotificationSenderOptions { return func(s *FriendNotificationSender) { - f := func(ctx context.Context, userIDs []string) (result []notification.CommonUser, err error) { + f := func(ctx context.Context, userIDs []string) (result []common_user.CommonUser, err error) { users, err := fn(ctx, userIDs) if err != nil { return nil, err @@ -84,13 +87,11 @@ func WithRpcFunc( } } -func NewFriendNotificationSender( - conf *config.Notification, - msgRpcClient *rpcclient.MessageRpcClient, - opts ...friendNotificationSenderOptions, -) *FriendNotificationSender { +func NewFriendNotificationSender(conf *config.Notification, msgClient *rpcli.MsgClient, opts ...friendNotificationSenderOptions) *FriendNotificationSender { f := &FriendNotificationSender{ - NotificationSender: rpcclient.NewNotificationSender(conf, rpcclient.WithRpcClient(msgRpcClient)), + NotificationSender: rpcclient.NewNotificationSender(conf, rpcclient.WithRpcClient(func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { + return msgClient.SendMsg(ctx, req) + })), } for _, opt := range opts { opt(f) diff --git a/internal/rpc/third/log.go b/internal/rpc/third/log.go index 4eeb5d558..4d8cbc0bb 100644 --- a/internal/rpc/third/log.go +++ b/internal/rpc/third/log.go @@ -19,10 +19,9 @@ import ( "crypto/rand" "time" - relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" + relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/errs" @@ -149,7 +148,7 @@ func (t *thirdServer) SearchLogs(ctx context.Context, req *third.SearchLogsReq) for _, log := range logs { userIDs = append(userIDs, log.UserID) } - userMap, err := t.userRpcClient.GetUsersInfoMap(ctx, userIDs) + userMap, err := t.userClient.GetUsersInfoMap(ctx, userIDs) if err != nil { return nil, err } diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go index b4b064d7f..8796fe824 100644 --- a/internal/rpc/third/s3.go +++ b/internal/rpc/third/s3.go @@ -19,17 +19,14 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "path" "strconv" "time" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "go.mongodb.org/mongo-driver/mongo" - "github.com/google/uuid" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" @@ -288,87 +285,35 @@ func (t *thirdServer) apiAddress(prefix, name string) string { } func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) { - var conf config.Third + if err := authverify.CheckAdmin(ctx, t.config.Share.IMAdminUserID); err != nil { + return nil, err + } + engine := t.config.RpcConfig.Object.Enable expireTime := time.UnixMilli(req.ExpireTime) - - findPagination := &sdkws.RequestPagination{ - PageNumber: 1, - ShowNumber: 500, - } - // Find all expired data in S3 database - total, models, err := t.s3dataBase.FindNeedDeleteObjectByDB(ctx, expireTime, req.ObjectGroup, findPagination) - if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments { - return nil, errs.Wrap(err) + models, err := t.s3dataBase.FindExpirationObject(ctx, engine, expireTime, req.ObjectGroup, int64(req.Limit)) + if err != nil { + return nil, err } - - if total == 0 { - log.ZDebug(ctx, "Not have OutdatedData", "delete Total", total) - return &third.DeleteOutdatedDataResp{Count: int32(total)}, nil - } - - needDelObjectKeys := make([]string, len(models)) - for _, model := range models { - needDelObjectKeys = append(needDelObjectKeys, model.Key) - } - - // Remove duplicate keys, have the same key use in different models - needDelObjectKeys = datautil.Distinct(needDelObjectKeys) - - for _, key := range needDelObjectKeys { - // Find all models by key - keyModels, err := t.s3dataBase.FindModelsByKey(ctx, key) - if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments { + for i, obj := range models { + if err := t.s3dataBase.DeleteSpecifiedData(ctx, engine, []string{obj.Name}); err != nil { return nil, errs.Wrap(err) } - - // check keyModels, if all keyModels. - needDelKey := true // Default can delete - for _, keymodel := range keyModels { - // If group is empty or CreateTime is after expireTime, can't delete this key - if keymodel.Group == "" || keymodel.CreateTime.After(expireTime) { - needDelKey = false - break - } + if err := t.s3dataBase.DelS3Key(ctx, engine, obj.Name); err != nil { + return nil, err } - - // If this object is not referenced by not expire data, delete it - if needDelKey && t.minio != nil { - // If have a thumbnail, delete it - thumbnailKey, _ := t.getMinioImageThumbnailKey(ctx, key) - if thumbnailKey != "" { - err := t.s3dataBase.DeleteObject(ctx, thumbnailKey) - if err != nil { - log.ZWarn(ctx, "Delete thumbnail object is error:", errs.Wrap(err), "thumbnailKey", thumbnailKey) - } - } - - // Delete object - err = t.s3dataBase.DeleteObject(ctx, key) - if err != nil { - log.ZWarn(ctx, "Delete object is error", errs.Wrap(err), "object key", key) - } - - // Delete cache key - err = t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, key) - if err != nil { - log.ZWarn(ctx, "Delete cache key is error:", errs.Wrap(err), "cache S3 key:", key) - } - } - } - - // handle delete data in S3 database - for _, model := range models { - // Delete all expired data row in S3 database - err := t.s3dataBase.DeleteSpecifiedData(ctx, model.Engine, model.Name) + count, err := t.s3dataBase.GetKeyCount(ctx, engine, obj.Key) if err != nil { - return nil, errs.Wrap(err) + return nil, err + } + log.ZDebug(ctx, "delete s3 object record", "index", i, "s3", obj, "count", count) + if count == 0 { + if err := t.s3.DeleteObject(ctx, obj.Key); err != nil { + return nil, err + } } } - - log.ZDebug(ctx, "DeleteOutdatedData", "delete Total", total) - - return &third.DeleteOutdatedDataResp{Count: int32(total)}, nil + return &third.DeleteOutdatedDataResp{Count: int32(len(models))}, nil } type FormDataMate struct { diff --git a/internal/rpc/third/third.go b/internal/rpc/third/third.go index 4206a2d6f..bc251ebd0 100644 --- a/internal/rpc/third/third.go +++ b/internal/rpc/third/third.go @@ -17,14 +17,14 @@ package third import ( "context" "fmt" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" - "github.com/openimsdk/open-im-server/v3/pkg/localcache" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" + "github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/protocol/third" "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/redisutil" @@ -41,10 +41,10 @@ type thirdServer struct { third.UnimplementedThirdServer thirdDatabase controller.ThirdDatabase s3dataBase controller.S3Database - userRpcClient rpcclient.UserRpcClient defaultExpire time.Duration config *Config - minio *minio.Minio + s3 s3.Interface + userClient *rpcli.UserClient } type Config struct { @@ -79,13 +79,11 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg // Select the oss method according to the profile policy enable := config.RpcConfig.Object.Enable var ( - o s3.Interface - minioCli *minio.Minio + o s3.Interface ) switch enable { case "minio": - minioCli, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build()) - o = minioCli + o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build()) case "cos": o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build()) case "oss": @@ -98,22 +96,22 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg if err != nil { return err } + userConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.User) + if err != nil { + return err + } localcache.InitLocalCache(&config.LocalCacheConfig) third.RegisterThirdServer(server, &thirdServer{ thirdDatabase: controller.NewThirdDatabase(redis.NewThirdCache(rdb), logdb), - userRpcClient: rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID), s3dataBase: controller.NewS3Database(rdb, o, s3db), defaultExpire: time.Hour * 24 * 7, config: config, - minio: minioCli, + s3: o, + userClient: rpcli.NewUserClient(userConn), }) return nil } -func (t *thirdServer) getMinioImageThumbnailKey(ctx context.Context, name string) (string, error) { - return t.minio.GetImageThumbnailKey(ctx, name) -} - func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) { err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime) if err != nil { diff --git a/internal/rpc/user/notification.go b/internal/rpc/user/notification.go index b992c9d12..03fdf95bd 100644 --- a/internal/rpc/user/notification.go +++ b/internal/rpc/user/notification.go @@ -16,18 +16,21 @@ package user import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/protocol/msg" + relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" + "github.com/openimsdk/open-im-server/v3/pkg/notification/common_user" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/notification" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" ) type UserNotificationSender struct { *rpcclient.NotificationSender - getUsersInfo func(ctx context.Context, userIDs []string) ([]notification.CommonUser, error) + getUsersInfo func(ctx context.Context, userIDs []string) ([]common_user.CommonUser, error) // db controller db controller.UserDatabase } @@ -44,7 +47,7 @@ func WithUserFunc( fn func(ctx context.Context, userIDs []string) (users []*relationtb.User, err error), ) userNotificationSenderOptions { return func(u *UserNotificationSender) { - f := func(ctx context.Context, userIDs []string) (result []notification.CommonUser, err error) { + f := func(ctx context.Context, userIDs []string) (result []common_user.CommonUser, err error) { users, err := fn(ctx, userIDs) if err != nil { return nil, err @@ -58,9 +61,11 @@ func WithUserFunc( } } -func NewUserNotificationSender(config *Config, msgRpcClient *rpcclient.MessageRpcClient, opts ...userNotificationSenderOptions) *UserNotificationSender { +func NewUserNotificationSender(config *Config, msgClient *rpcli.MsgClient, opts ...userNotificationSenderOptions) *UserNotificationSender { f := &UserNotificationSender{ - NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(msgRpcClient)), + NotificationSender: rpcclient.NewNotificationSender(&config.NotificationConfig, rpcclient.WithRpcClient(func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { + return msgClient.SendMsg(ctx, req) + })), } for _, opt := range opts { opt(f) diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index 2dfbb01df..02b3dc81d 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -17,6 +17,7 @@ package user import ( "context" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "math/rand" "strings" "sync" @@ -39,7 +40,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" pbuser "github.com/openimsdk/protocol/user" @@ -57,11 +57,11 @@ type userServer struct { db controller.UserDatabase friendNotificationSender *relation.FriendNotificationSender userNotificationSender *UserNotificationSender - friendRpcClient *rpcclient.FriendRpcClient - groupRpcClient *rpcclient.GroupRpcClient RegisterCenter registry.SvcDiscoveryRegistry config *Config webhookClient *webhook.Client + groupClient *rpcli.GroupClient + relationClient *rpcli.RelationClient } type Config struct { @@ -94,22 +94,33 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi if err != nil { return err } + msgConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Msg) + if err != nil { + return err + } + groupConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Group) + if err != nil { + return err + } + friendConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Friend) + if err != nil { + return err + } + msgClient := rpcli.NewMsgClient(msgConn) userCache := redis.NewUserCacheRedis(rdb, &config.LocalCacheConfig, userDB, redis.GetRocksCacheOptions()) database := controller.NewUserDatabase(userDB, userCache, mgocli.GetTx()) - friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend) - groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) - msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) localcache.InitLocalCache(&config.LocalCacheConfig) u := &userServer{ online: redis.NewUserOnline(rdb), db: database, RegisterCenter: client, - friendRpcClient: &friendRpcClient, - groupRpcClient: &groupRpcClient, - friendNotificationSender: relation.NewFriendNotificationSender(&config.NotificationConfig, &msgRpcClient, relation.WithDBFunc(database.FindWithError)), - userNotificationSender: NewUserNotificationSender(config, &msgRpcClient, WithUserFunc(database.FindWithError)), + friendNotificationSender: relation.NewFriendNotificationSender(&config.NotificationConfig, msgClient, relation.WithDBFunc(database.FindWithError)), + userNotificationSender: NewUserNotificationSender(config, msgClient, WithUserFunc(database.FindWithError)), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), + + groupClient: rpcli.NewGroupClient(groupConn), + relationClient: rpcli.NewRelationClient(friendConn), } pbuser.RegisterUserServer(server, u) return u.db.InitOnce(context.Background(), users) @@ -641,7 +652,7 @@ func (s *userServer) NotificationUserInfoUpdate(ctx context.Context, userID stri wg.Add(len(es)) go func() { defer wg.Done() - _, es[0] = s.groupRpcClient.Client.NotificationUserInfoUpdate(ctx, &group.NotificationUserInfoUpdateReq{ + _, es[0] = s.groupClient.NotificationUserInfoUpdate(ctx, &group.NotificationUserInfoUpdateReq{ UserID: userID, OldUserInfo: oldUserInfo, NewUserInfo: newUserInfo, @@ -650,7 +661,7 @@ func (s *userServer) NotificationUserInfoUpdate(ctx context.Context, userID stri go func() { defer wg.Done() - _, es[1] = s.friendRpcClient.Client.NotificationUserInfoUpdate(ctx, &friendpb.NotificationUserInfoUpdateReq{ + _, es[1] = s.relationClient.NotificationUserInfoUpdate(ctx, &friendpb.NotificationUserInfoUpdateReq{ UserID: userID, OldUserInfo: oldUserInfo, NewUserInfo: newUserInfo, diff --git a/internal/tools/cron_task.go b/internal/tools/cron_task.go index 4f87036c7..79df7500a 100644 --- a/internal/tools/cron_task.go +++ b/internal/tools/cron_task.go @@ -16,10 +16,6 @@ package tools import ( "context" - "fmt" - "os" - "time" - "github.com/openimsdk/open-im-server/v3/pkg/common/config" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" pbconversation "github.com/openimsdk/protocol/conversation" @@ -69,87 +65,58 @@ func Start(ctx context.Context, config *CronTaskConfig) error { return err } - msgClient := msg.NewMsgClient(msgConn) - conversationClient := pbconversation.NewConversationClient(conversationConn) - thirdClient := third.NewThirdClient(thirdConn) - - crontab := cron.New() - - // scheduled hard delete outdated Msgs in specific time. - destructMsgsFunc := func() { - now := time.Now() - deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords)) - ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli())) - log.ZDebug(ctx, "Destruct chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) - - if _, err := msgClient.DestructMsgs(ctx, &msg.DestructMsgsReq{Timestamp: deltime.UnixMilli()}); err != nil { - log.ZError(ctx, "cron destruct chat records failed", err, "deltime", deltime, "cont", time.Since(now)) - return - } - log.ZDebug(ctx, "cron destruct chat records success", "deltime", deltime, "cont", time.Since(now)) - } - if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, destructMsgsFunc); err != nil { - return errs.Wrap(err) + srv := &cronServer{ + ctx: ctx, + config: config, + cron: cron.New(), + msgClient: msg.NewMsgClient(msgConn), + conversationClient: pbconversation.NewConversationClient(conversationConn), + thirdClient: third.NewThirdClient(thirdConn), } - // scheduled soft delete outdated Msgs in specific time when user set `is_msg_destruct` feature. - clearMsgFunc := func() { - now := time.Now() - ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli())) - log.ZDebug(ctx, "clear msg cron start", "now", now) - - conversations, err := conversationClient.GetConversationsNeedClearMsg(ctx, &pbconversation.GetConversationsNeedClearMsgReq{}) - if err != nil { - log.ZError(ctx, "Get conversation need Destruct msgs failed.", err) - return - } - - _, err = msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Conversations: conversations.Conversations}) - if err != nil { - log.ZError(ctx, "Clear Msg failed.", err) - return - } - - log.ZDebug(ctx, "clear msg cron task completed", "cont", time.Since(now)) + if err := srv.registerClearS3(); err != nil { + return err } - if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil { - return errs.Wrap(err) + if err := srv.registerDeleteMsg(); err != nil { + return err } - - // scheduled delete outdated file Objects and their datas in specific time. - deleteObjectFunc := func() { - now := time.Now() - executeNum := 5 - // number of pagination. if need modify, need update value in third.DeleteOutdatedData - pageShowNumber := 500 - deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) - ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) - log.ZDebug(ctx, "deleteoutDatedData", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) - - if len(config.CronTask.DeleteObjectType) == 0 { - log.ZDebug(ctx, "cron deleteoutDatedData not type need delete", "deletetime", deleteTime, "DeleteObjectType", config.CronTask.DeleteObjectType, "cont", time.Since(now)) - return - } - - for i := 0; i < executeNum; i++ { - resp, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli(), ObjectGroup: config.CronTask.DeleteObjectType}) - if err != nil { - log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) - return - } - if resp.Count == 0 || resp.Count < int32(pageShowNumber) { - break - } - } - - log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) + if err := srv.registerClearUserMsg(); err != nil { + return err } - if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { - return errs.Wrap(err) - } - log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) - crontab.Start() + srv.cron.Start() <-ctx.Done() return nil } + +type cronServer struct { + ctx context.Context + config *CronTaskConfig + cron *cron.Cron + msgClient msg.MsgClient + conversationClient pbconversation.ConversationClient + thirdClient third.ThirdClient +} + +func (c *cronServer) registerClearS3() error { + if c.config.CronTask.FileExpireTime <= 0 || len(c.config.CronTask.DeleteObjectType) == 0 { + log.ZInfo(c.ctx, "disable scheduled cleanup of s3", "fileExpireTime", c.config.CronTask.FileExpireTime, "deleteObjectType", c.config.CronTask.DeleteObjectType) + return nil + } + _, err := c.cron.AddFunc(c.config.CronTask.CronExecuteTime, c.clearS3) + return errs.WrapMsg(err, "failed to register clear s3 cron task") +} + +func (c *cronServer) registerDeleteMsg() error { + if c.config.CronTask.RetainChatRecords <= 0 { + log.ZInfo(c.ctx, "disable scheduled cleanup of chat records", "retainChatRecords", c.config.CronTask.RetainChatRecords) + return nil + } + _, err := c.cron.AddFunc(c.config.CronTask.CronExecuteTime, c.deleteMsg) + return errs.WrapMsg(err, "failed to register delete msg cron task") +} + +func (c *cronServer) registerClearUserMsg() error { + _, err := c.cron.AddFunc(c.config.CronTask.CronExecuteTime, c.clearUserMsg) + return errs.WrapMsg(err, "failed to register clear user msg cron task") +} diff --git a/internal/tools/cron_test.go b/internal/tools/cron_test.go new file mode 100644 index 000000000..890349069 --- /dev/null +++ b/internal/tools/cron_test.go @@ -0,0 +1,63 @@ +package tools + +import ( + "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" + pbconversation "github.com/openimsdk/protocol/conversation" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/third" + "github.com/openimsdk/tools/mcontext" + "github.com/openimsdk/tools/mw" + "github.com/robfig/cron/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "testing" +) + +func TestName(t *testing.T) { + conf := &config.Discovery{ + Enable: config.ETCD, + Etcd: config.Etcd{ + RootDirectory: "openim", + Address: []string{"localhost:12379"}, + }, + } + client, err := kdisc.NewDiscoveryRegister(conf, "source") + if err != nil { + panic(err) + } + client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials())) + ctx := mcontext.SetOpUserID(context.Background(), "imAdmin") + msgConn, err := client.GetConn(ctx, "msg-rpc-service") + if err != nil { + panic(err) + } + thirdConn, err := client.GetConn(ctx, "third-rpc-service") + if err != nil { + panic(err) + } + + conversationConn, err := client.GetConn(ctx, "conversation-rpc-service") + if err != nil { + panic(err) + } + + srv := &cronServer{ + ctx: ctx, + config: &CronTaskConfig{ + CronTask: config.CronTask{ + RetainChatRecords: 1, + FileExpireTime: 1, + DeleteObjectType: []string{"msg-picture", "msg-file", "msg-voice", "msg-video", "msg-video-snapshot", "sdklog", ""}, + }, + }, + cron: cron.New(), + msgClient: msg.NewMsgClient(msgConn), + conversationClient: pbconversation.NewConversationClient(conversationConn), + thirdClient: third.NewThirdClient(thirdConn), + } + srv.deleteMsg() + //srv.clearS3() + //srv.clearUserMsg() +} diff --git a/internal/tools/msg.go b/internal/tools/msg.go new file mode 100644 index 000000000..cc00cc5b8 --- /dev/null +++ b/internal/tools/msg.go @@ -0,0 +1,36 @@ +package tools + +import ( + "fmt" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "os" + "time" +) + +func (c *cronServer) deleteMsg() { + now := time.Now() + deltime := now.Add(-time.Hour * 24 * time.Duration(c.config.CronTask.RetainChatRecords)) + operationID := fmt.Sprintf("cron_msg_%d_%d", os.Getpid(), deltime.UnixMilli()) + ctx := mcontext.SetOperationID(c.ctx, operationID) + log.ZDebug(ctx, "Destruct chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) + const ( + deleteCount = 10000 + deleteLimit = 50 + ) + var count int + for i := 1; i <= deleteCount; i++ { + ctx := mcontext.SetOperationID(c.ctx, fmt.Sprintf("%s_%d", operationID, i)) + resp, err := c.msgClient.DestructMsgs(ctx, &msg.DestructMsgsReq{Timestamp: deltime.UnixMilli(), Limit: deleteLimit}) + if err != nil { + log.ZError(ctx, "cron destruct chat records failed", err) + break + } + count += int(resp.Count) + if resp.Count < deleteLimit { + break + } + } + log.ZDebug(ctx, "cron destruct chat records end", "deltime", deltime, "cont", time.Since(now), "count", count) +} diff --git a/internal/tools/s3.go b/internal/tools/s3.go new file mode 100644 index 000000000..9b6b9c408 --- /dev/null +++ b/internal/tools/s3.go @@ -0,0 +1,79 @@ +package tools + +import ( + "fmt" + "github.com/openimsdk/protocol/third" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "os" + "time" +) + +func (c *cronServer) clearS3() { + start := time.Now() + deleteTime := start.Add(-time.Hour * 24 * time.Duration(c.config.CronTask.FileExpireTime)) + operationID := fmt.Sprintf("cron_s3_%d_%d", os.Getpid(), deleteTime.UnixMilli()) + ctx := mcontext.SetOperationID(c.ctx, operationID) + log.ZDebug(ctx, "deleteoutDatedData", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) + const ( + deleteCount = 10000 + deleteLimit = 100 + ) + + var count int + for i := 1; i <= deleteCount; i++ { + resp, err := c.thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli(), ObjectGroup: c.config.CronTask.DeleteObjectType, Limit: deleteLimit}) + if err != nil { + log.ZError(ctx, "cron deleteoutDatedData failed", err) + return + } + count += int(resp.Count) + if resp.Count < deleteLimit { + break + } + } + log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(start), "count", count) +} + +// var req *third.DeleteOutdatedDataReq +// count1, err := ExtractField(ctx, c.thirdClient.DeleteOutdatedData, req, (*third.DeleteOutdatedDataResp).GetCount) +// +// c.thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{}) +// msggateway.GetUsersOnlineStatusCaller.Invoke(ctx, &msggateway.GetUsersOnlineStatusReq{}) +// +// var cli ThirdClient +// +// c111, err := cli.DeleteOutdatedData(ctx, 100) +// +// cli.ThirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{}) +// +// cli.AuthSign(ctx, &third.AuthSignReq{}) +// +// cli.SetAppBadge() +// +//} +// +//func extractField[A, B, C any](ctx context.Context, fn func(ctx context.Context, req *A, opts ...grpc.CallOption) (*B, error), req *A, get func(*B) C) (C, error) { +// resp, err := fn(ctx, req) +// if err != nil { +// var c C +// return c, err +// } +// return get(resp), nil +//} +// +//func ignore(_ any, err error) error { +// return err +//} +// +//type ThirdClient struct { +// third.ThirdClient +//} +// +//func (c *ThirdClient) DeleteOutdatedData(ctx context.Context, expireTime int64) (int32, error) { +// return extractField(ctx, c.ThirdClient.DeleteOutdatedData, &third.DeleteOutdatedDataReq{ExpireTime: expireTime}, (*third.DeleteOutdatedDataResp).GetCount) +//} +// +//func (c *ThirdClient) DeleteOutdatedData1(ctx context.Context, expireTime int64) error { +// return ignore(c.ThirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: expireTime})) +//} diff --git a/internal/tools/user_msg.go b/internal/tools/user_msg.go new file mode 100644 index 000000000..a4afa769e --- /dev/null +++ b/internal/tools/user_msg.go @@ -0,0 +1,34 @@ +package tools + +import ( + "fmt" + pbconversation "github.com/openimsdk/protocol/conversation" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/mcontext" + "os" + "time" +) + +func (c *cronServer) clearUserMsg() { + now := time.Now() + operationID := fmt.Sprintf("cron_user_msg_%d_%d", os.Getpid(), now.UnixMilli()) + ctx := mcontext.SetOperationID(c.ctx, operationID) + log.ZDebug(ctx, "clear user msg cron start") + const ( + deleteCount = 10000 + deleteLimit = 100 + ) + var count int + for i := 1; i <= deleteCount; i++ { + resp, err := c.conversationClient.ClearUserConversationMsg(ctx, &pbconversation.ClearUserConversationMsgReq{Timestamp: now.UnixMilli(), Limit: deleteLimit}) + if err != nil { + log.ZError(ctx, "ClearUserConversationMsg failed.", err) + return + } + count += int(resp.Count) + if resp.Count < deleteLimit { + break + } + } + log.ZDebug(ctx, "clear user msg cron task completed", "cont", time.Since(now), "count", count) +} diff --git a/pkg/common/convert/group.go b/pkg/common/convert/group.go index bc2b2f998..5e41599c6 100644 --- a/pkg/common/convert/group.go +++ b/pkg/common/convert/group.go @@ -80,9 +80,18 @@ func Db2PbGroupMember(m *model.GroupMember) *sdkws.GroupMemberFullInfo { } } -func Db2PbGroupRequest(m *model.GroupRequest, user *sdkws.PublicUserInfo, group *sdkws.GroupInfo) *sdkws.GroupRequest { +func Db2PbGroupRequest(m *model.GroupRequest, user *sdkws.UserInfo, group *sdkws.GroupInfo) *sdkws.GroupRequest { + var pu *sdkws.PublicUserInfo + if user != nil { + pu = &sdkws.PublicUserInfo{ + UserID: user.UserID, + Nickname: user.Nickname, + FaceURL: user.FaceURL, + Ex: user.Ex, + } + } return &sdkws.GroupRequest{ - UserInfo: user, + UserInfo: pu, GroupInfo: group, HandleResult: m.HandleResult, ReqMsg: m.ReqMsg, diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index fb8782d30..31f1a060a 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -68,13 +68,9 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo defer client.Close() client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) - registerIP, err = network.GetRpcRegisterIP(registerIP) - if err != nil { - return err - } - //var reg *prometheus.Registry - //var metric *grpcprometheus.ServerMetrics + // var reg *prometheus.Registry + // var metric *grpcprometheus.ServerMetrics if prometheusConfig.Enable { //cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share) //reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics) diff --git a/pkg/common/storage/cache/cachekey/msg.go b/pkg/common/storage/cache/cachekey/msg.go index 8e05b64f1..ac449df38 100644 --- a/pkg/common/storage/cache/cachekey/msg.go +++ b/pkg/common/storage/cache/cachekey/msg.go @@ -15,50 +15,16 @@ package cachekey import ( - "github.com/openimsdk/protocol/constant" "strconv" ) const ( - messageCache = "MESSAGE_CACHE:" - messageDelUserList = "MESSAGE_DEL_USER_LIST:" - userDelMessagesList = "USER_DEL_MESSAGES_LIST:" - sendMsgFailedFlag = "SEND_MSG_FAILED_FLAG:" - exTypeKeyLocker = "EX_LOCK:" - reactionExSingle = "EX_SINGLE_" - reactionWriteGroup = "EX_GROUP_" - reactionReadGroup = "EX_SUPER_GROUP_" - reactionNotification = "EX_NOTIFICATION_" + sendMsgFailedFlag = "SEND_MSG_FAILED_FLAG:" + messageCache = "MSG_CACHE:" ) -func GetMessageCacheKey(conversationID string, seq int64) string { - return messageCache + conversationID + "_" + strconv.Itoa(int(seq)) -} - -func GetMessageDelUserListKey(conversationID string, seq int64) string { - return messageDelUserList + conversationID + ":" + strconv.Itoa(int(seq)) -} - -func GetUserDelListKey(conversationID, userID string) string { - return userDelMessagesList + conversationID + ":" + userID -} - -func GetMessageReactionExKey(clientMsgID string, sessionType int32) string { - switch sessionType { - case constant.SingleChatType: - return reactionExSingle + clientMsgID - case constant.WriteGroupChatType: - return reactionWriteGroup + clientMsgID - case constant.ReadGroupChatType: - return reactionReadGroup + clientMsgID - case constant.NotificationChatType: - return reactionNotification + clientMsgID - } - - return "" -} -func GetLockMessageTypeKey(clientMsgID string, TypeKey string) string { - return exTypeKeyLocker + clientMsgID + "_" + TypeKey +func GetMsgCacheKey(conversationID string, seq int64) string { + return messageCache + conversationID + ":" + strconv.Itoa(int(seq)) } func GetSendMsgKey(id string) string { diff --git a/pkg/common/storage/cache/msg.go b/pkg/common/storage/cache/msg.go index 00eb28c02..271ed19fe 100644 --- a/pkg/common/storage/cache/msg.go +++ b/pkg/common/storage/cache/msg.go @@ -16,23 +16,14 @@ package cache import ( "context" - "time" - - "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" ) type MsgCache interface { - GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error) - SetMessagesToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) - DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error SetSendMsgStatus(ctx context.Context, id string, status int32) error GetSendMsgStatus(ctx context.Context, id string) (int32, error) - JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) - GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) - DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error - SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) - GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) - SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error - LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error - UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error + + GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) + DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error + SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error } diff --git a/pkg/common/storage/cache/redis/msg.go b/pkg/common/storage/cache/redis/msg.go index b04bc5c35..0651f0283 100644 --- a/pkg/common/storage/cache/redis/msg.go +++ b/pkg/common/storage/cache/redis/msg.go @@ -2,10 +2,12 @@ package redis import ( "context" + "encoding/json" + "github.com/dtm-labs/rockscache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" - "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" - "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/utils/datautil" "github.com/redis/go-redis/v9" @@ -13,76 +15,26 @@ import ( ) // // msgCacheTimeout is expiration time of message cache, 86400 seconds -const msgCacheTimeout = 86400 +const msgCacheTimeout = time.Hour * 24 -func NewMsgCache(client redis.UniversalClient) cache.MsgCache { - return &msgCache{rdb: client} +func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache { + return &msgCache{ + rdb: client, + rcClient: rockscache.NewClient(client, *GetRocksCacheOptions()), + msgDocDatabase: db, + } } type msgCache struct { - rdb redis.UniversalClient -} - -func (c *msgCache) getMessageCacheKey(conversationID string, seq int64) string { - return cachekey.GetMessageCacheKey(conversationID, seq) -} -func (c *msgCache) getMessageDelUserListKey(conversationID string, seq int64) string { - return cachekey.GetMessageDelUserListKey(conversationID, seq) -} - -func (c *msgCache) getUserDelList(conversationID, userID string) string { - return cachekey.GetUserDelListKey(conversationID, userID) + rdb redis.UniversalClient + rcClient *rockscache.Client + msgDocDatabase database.Msg } func (c *msgCache) getSendMsgKey(id string) string { return cachekey.GetSendMsgKey(id) } -func (c *msgCache) getLockMessageTypeKey(clientMsgID string, TypeKey string) string { - return cachekey.GetLockMessageTypeKey(clientMsgID, TypeKey) -} - -func (c *msgCache) getMessageReactionExPrefix(clientMsgID string, sessionType int32) string { - return cachekey.GetMessageReactionExKey(clientMsgID, sessionType) -} - -func (c *msgCache) SetMessagesToCache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (int, error) { - msgMap := datautil.SliceToMap(msgs, func(msg *sdkws.MsgData) string { - return c.getMessageCacheKey(conversationID, msg.Seq) - }) - keys := datautil.Slice(msgs, func(msg *sdkws.MsgData) string { - return c.getMessageCacheKey(conversationID, msg.Seq) - }) - err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { - var values []string - for _, key := range keys { - if msg, ok := msgMap[key]; ok { - s, err := msgprocessor.Pb2String(msg) - if err != nil { - return err - } - values = append(values, s) - } - } - return LuaSetBatchWithCommonExpire(ctx, c.rdb, keys, values, msgCacheTimeout) - }) - if err != nil { - return 0, err - } - return len(msgs), nil -} - -func (c *msgCache) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { - var keys []string - for _, seq := range seqs { - keys = append(keys, c.getMessageCacheKey(conversationID, seq)) - } - - return ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { - return LuaDeleteBatch(ctx, c.rdb, keys) - }) -} - func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error { return errs.Wrap(c.rdb.Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err()) } @@ -92,81 +44,53 @@ func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, erro return int32(result), errs.Wrap(err) } -func (c *msgCache) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { - key := c.getLockMessageTypeKey(clientMsgID, TypeKey) - return errs.Wrap(c.rdb.SetNX(ctx, key, 1, time.Minute).Err()) +func (c *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) { + if len(seqs) == 0 { + return nil, nil + } + getKey := func(seq int64) string { + return cachekey.GetMsgCacheKey(conversationID, seq) + } + getMsgID := func(msg *model.MsgInfoModel) int64 { + return msg.Msg.Seq + } + find := func(ctx context.Context, seqs []int64) ([]*model.MsgInfoModel, error) { + return c.msgDocDatabase.FindSeqs(ctx, conversationID, seqs) + } + return batchGetCache2(ctx, c.rcClient, msgCacheTimeout, seqs, getKey, getMsgID, find) } -func (c *msgCache) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { - key := c.getLockMessageTypeKey(clientMsgID, TypeKey) - return errs.Wrap(c.rdb.Del(ctx, key).Err()) -} - -func (c *msgCache) JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) { - n, err := c.rdb.Exists(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result() +func (c *msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error { + if len(seqs) == 0 { + return nil + } + keys := datautil.Slice(seqs, func(seq int64) string { + return cachekey.GetMsgCacheKey(conversationID, seq) + }) + slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(c.rcClient), keys) if err != nil { - return false, errs.Wrap(err) + return err } - - return n > 0, nil -} - -func (c *msgCache) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error { - return errs.Wrap(c.rdb.HSet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey, value).Err()) -} - -func (c *msgCache) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) { - val, err := c.rdb.Expire(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), expiration).Result() - return val, errs.Wrap(err) -} - -func (c *msgCache) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) { - val, err := c.rdb.HGet(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), typeKey).Result() - return val, errs.Wrap(err) -} - -func (c *msgCache) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) { - val, err := c.rdb.HGetAll(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType)).Result() - return val, errs.Wrap(err) -} - -func (c *msgCache) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error { - return errs.Wrap(c.rdb.HDel(ctx, c.getMessageReactionExPrefix(clientMsgID, sessionType), subKey).Err()) -} - -func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string, seqs []int64) (seqMsgs []*sdkws.MsgData, failedSeqs []int64, err error) { - var keys []string - keySeqMap := make(map[string]int64, 10) - for _, seq := range seqs { - key := c.getMessageCacheKey(conversationID, seq) - keys = append(keys, key) - keySeqMap[key] = seq + for _, keys := range slotKeys { + if err := c.rcClient.TagAsDeletedBatch2(ctx, keys); err != nil { + return err + } } - err = ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { - result, err := LuaGetBatch(ctx, c.rdb, keys) + return nil +} + +func (c *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error { + for _, msg := range msgs { + if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 { + continue + } + data, err := json.Marshal(msg) if err != nil { return err } - for i, value := range result { - seq := keySeqMap[keys[i]] - if value == nil { - failedSeqs = append(failedSeqs, seq) - continue - } - - msg := &sdkws.MsgData{} - msgString, ok := value.(string) - if !ok || msgprocessor.String2Pb(msgString, msg) != nil { - failedSeqs = append(failedSeqs, seq) - continue - } - seqMsgs = append(seqMsgs, msg) - + if err := c.rcClient.RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil { + return err } - return nil - }) - if err != nil { - return nil, nil, err } - return seqMsgs, failedSeqs, nil + return nil } diff --git a/pkg/common/storage/cache/redis/msg_test.go b/pkg/common/storage/cache/redis/msg_test.go deleted file mode 100644 index 10b9ce18b..000000000 --- a/pkg/common/storage/cache/redis/msg_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package redis - -import ( - "context" - "fmt" - "github.com/openimsdk/protocol/sdkws" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/proto" - "testing" -) - -func Test_msgCache_SetMessagesToCache(t *testing.T) { - type fields struct { - rdb redis.UniversalClient - } - type args struct { - ctx context.Context - conversationID string - msgs []*sdkws.MsgData - } - tests := []struct { - name string - fields fields - args args - want int - wantErr assert.ErrorAssertionFunc - }{ - {"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Username: "", Password: "openIM123", DB: 0})}, args{context.Background(), - "cid", []*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}}, 3, assert.NoError}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &msgCache{ - rdb: tt.fields.rdb, - } - got, err := c.SetMessagesToCache(tt.args.ctx, tt.args.conversationID, tt.args.msgs) - if !tt.wantErr(t, err, fmt.Sprintf("SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs)) { - return - } - assert.Equalf(t, tt.want, got, "SetMessagesToCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.msgs) - }) - } -} - -func Test_msgCache_GetMessagesBySeq(t *testing.T) { - type fields struct { - rdb redis.UniversalClient - } - type args struct { - ctx context.Context - conversationID string - seqs []int64 - } - var failedSeq []int64 - tests := []struct { - name string - fields fields - args args - wantSeqMsgs []*sdkws.MsgData - wantFailedSeqs []int64 - wantErr assert.ErrorAssertionFunc - }{ - {"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123", DB: 0})}, - args{context.Background(), "cid", []int64{1, 2, 3}}, - []*sdkws.MsgData{{Seq: 1}, {Seq: 2}, {Seq: 3}}, failedSeq, assert.NoError}, - {"test2", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123", DB: 0})}, - args{context.Background(), "cid", []int64{4, 5, 6}}, - nil, []int64{4, 5, 6}, assert.NoError}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &msgCache{ - rdb: tt.fields.rdb, - } - gotSeqMsgs, gotFailedSeqs, err := c.GetMessagesBySeq(tt.args.ctx, tt.args.conversationID, tt.args.seqs) - if !tt.wantErr(t, err, fmt.Sprintf("GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)) { - return - } - equalMsgDataSlices(t, tt.wantSeqMsgs, gotSeqMsgs) - assert.Equalf(t, tt.wantFailedSeqs, gotFailedSeqs, "GetMessagesBySeq(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs) - }) - } -} - -func equalMsgDataSlices(t *testing.T, expected, actual []*sdkws.MsgData) { - assert.Equal(t, len(expected), len(actual), "Slices have different lengths") - for i := range expected { - assert.True(t, proto.Equal(expected[i], actual[i]), "Element %d not equal: expected %v, got %v", i, expected[i], actual[i]) - } -} - -func Test_msgCache_DeleteMessagesFromCache(t *testing.T) { - type fields struct { - rdb redis.UniversalClient - } - type args struct { - ctx context.Context - conversationID string - seqs []int64 - } - tests := []struct { - name string - fields fields - args args - wantErr assert.ErrorAssertionFunc - }{ - {"test1", fields{rdb: redis.NewClient(&redis.Options{Addr: "localhost:16379", Password: "openIM123"})}, - args{context.Background(), "cid", []int64{1, 2, 3}}, assert.NoError}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &msgCache{ - rdb: tt.fields.rdb, - } - tt.wantErr(t, c.DeleteMessagesFromCache(tt.args.ctx, tt.args.conversationID, tt.args.seqs), - fmt.Sprintf("DeleteMessagesFromCache(%v, %v, %v)", tt.args.ctx, tt.args.conversationID, tt.args.seqs)) - }) - } -} diff --git a/pkg/common/storage/cache/redis/token.go b/pkg/common/storage/cache/redis/token.go index 998b4f1c9..510da43e3 100644 --- a/pkg/common/storage/cache/redis/token.go +++ b/pkg/common/storage/cache/redis/token.go @@ -2,13 +2,14 @@ package redis import ( "context" + "strconv" + "sync" + "time" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/tools/errs" "github.com/redis/go-redis/v9" - "strconv" - "sync" - "time" ) type tokenCache struct { @@ -99,7 +100,7 @@ func (c *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, pla return errs.Wrap(c.rdb.HSet(ctx, cachekey.GetTokenKey(userID, platformID), mm).Err()) } -func (c *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]int) error { +func (c *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error { pipe := c.rdb.Pipeline() for k, v := range tokens { pipe.HSet(ctx, k, v) diff --git a/pkg/common/storage/cache/token.go b/pkg/common/storage/cache/token.go index ee0004d7f..e5e0a9383 100644 --- a/pkg/common/storage/cache/token.go +++ b/pkg/common/storage/cache/token.go @@ -11,6 +11,6 @@ type TokenModel interface { GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error - BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]int) error + BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error } diff --git a/pkg/common/storage/controller/auth.go b/pkg/common/storage/controller/auth.go index b134c3c3b..be8160955 100644 --- a/pkg/common/storage/controller/auth.go +++ b/pkg/common/storage/controller/auth.go @@ -56,7 +56,7 @@ func (a *authDatabase) SetTokenMapByUidPid(ctx context.Context, userID string, p } func (a *authDatabase) BatchSetTokenMapByUidPid(ctx context.Context, tokens []string) error { - setMap := make(map[string]map[string]int) + setMap := make(map[string]map[string]any) for _, token := range tokens { claims, err := tokenverify.GetClaimFromToken(token, authverify.Secret(a.accessSecret)) key := cachekey.GetTokenKey(claims.UserID, claims.PlatformID) @@ -66,7 +66,7 @@ func (a *authDatabase) BatchSetTokenMapByUidPid(ctx context.Context, tokens []st if v, ok := setMap[key]; ok { v[token] = constant.KickedToken } else { - setMap[key] = map[string]int{ + setMap[key] = map[string]any{ token: constant.KickedToken, } } diff --git a/pkg/common/storage/controller/conversation.go b/pkg/common/storage/controller/conversation.go index bf41cce95..d4088e0c0 100644 --- a/pkg/common/storage/controller/conversation.go +++ b/pkg/common/storage/controller/conversation.go @@ -74,6 +74,8 @@ type ConversationDatabase interface { GetNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) // GetPinnedConversationIDs gets pinned conversationIDs by userID GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) + // FindRandConversation finds random conversations based on the specified timestamp and limit. + FindRandConversation(ctx context.Context, ts int64, limit int) ([]*relationtb.Conversation, error) } func NewConversationDatabase(conversation database.Conversation, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase { @@ -401,3 +403,7 @@ func (c *conversationDatabase) GetPinnedConversationIDs(ctx context.Context, use } return conversationIDs, nil } + +func (c *conversationDatabase) FindRandConversation(ctx context.Context, ts int64, limit int) ([]*relationtb.Conversation, error) { + return c.conversationDB.FindRandConversation(ctx, ts, limit) +} diff --git a/pkg/common/storage/controller/msg.go b/pkg/common/storage/controller/msg.go index 8d82d8543..d5ad12584 100644 --- a/pkg/common/storage/controller/msg.go +++ b/pkg/common/storage/controller/msg.go @@ -18,6 +18,8 @@ import ( "context" "encoding/json" "errors" + "github.com/openimsdk/tools/utils/jsonutil" + "strconv" "strings" "time" @@ -37,7 +39,6 @@ import ( "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/utils/datautil" - "github.com/openimsdk/tools/utils/timeutil" ) const ( @@ -55,12 +56,8 @@ type CommonMsgDatabase interface { GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) // GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers. GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) - // DeleteConversationMsgsAndSetMinSeq deletes conversation messages and resets the minimum sequence number. If `remainTime` is 0, all messages are deleted (this method does not delete Redis - // cache). + GetMessagesBySeqWithBounds(ctx context.Context, userID string, conversationID string, seqs []int64, pullOrder sdkws.PullOrder) (bool, int64, []*sdkws.MsgData, error) - DeleteConversationMsgsAndSetMinSeq(ctx context.Context, conversationID string, remainTime int64) error - // ClearUserMsgs marks messages for deletion based on clear time and returns a list of sequence numbers for marked messages. - ClearUserMsgs(ctx context.Context, userID string, conversationID string, clearTime int64, lastMsgClearTime time.Time) (seqs []int64, err error) // DeleteUserMsgsBySeqs allows a user to delete messages based on sequence numbers. DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error // DeleteMsgsPhysicalBySeqs physically deletes messages by emptying them based on sequence numbers. @@ -69,6 +66,7 @@ type CommonMsgDatabase interface { GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) SetMinSeqs(ctx context.Context, seqs map[string]int64) error + SetMinSeq(ctx context.Context, conversationID string, seq int64) error SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error @@ -80,8 +78,6 @@ type CommonMsgDatabase interface { GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) - //GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error) - //GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error) SetSendMsgStatus(ctx context.Context, id string, status int32) error GetSendMsgStatus(ctx context.Context, id string) (int32, error) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*pbmsg.SearchedMsgData, err error) @@ -92,16 +88,15 @@ type CommonMsgDatabase interface { RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) - ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) - // get Msg when destruct msg before - GetBeforeMsg(ctx context.Context, ts int64, docIds []string, limit int) ([]*model.MsgDocModel, error) - DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) - - GetDocIDs(ctx context.Context) ([]string, error) + GetRandBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) SetUserConversationsMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error SetUserConversationsMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error + + DeleteDoc(ctx context.Context, docID string) error + + GetLastMessageSeqByTime(ctx context.Context, conversationID string, time int64) (int64, error) } func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) { @@ -115,7 +110,7 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser } return &commonMsgDatabase{ msgDocDatabase: msgDocModel, - msg: msg, + msgCache: msg, seqUser: seqUser, seqConversation: seqConversation, producer: producerToRedis, @@ -125,7 +120,7 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser type commonMsgDatabase struct { msgDocDatabase database.Msg msgTable model.MsgDocModel - msg cache.MsgCache + msgCache cache.MsgCache seqConversation cache.SeqConversationCache seqUser cache.SeqUser producer *kafka.Producer @@ -136,7 +131,7 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd return err } -func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { +func (db *commonMsgDatabase) batchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { if len(fields) == 0 { return nil } @@ -234,11 +229,15 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI tryUpdate = false // The current block is inserted successfully, and the next block is inserted preferentially i += insert - 1 // Skip the inserted data } + return nil } func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error { - return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq) + if err := db.batchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq); err != nil { + return err + } + return db.msgCache.DelMessageBySeqs(ctx, conversationID, []int64{seq}) } func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, totalSeqs []int64) error { @@ -253,24 +252,17 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI return err } } - return nil + return db.msgCache.DelMessageBySeqs(ctx, conversationID, totalSeqs) } func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) { - for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { - // log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs) - msgs, err := db.findMsgInfoBySeq(ctx, userID, docID, conversationID, seqs) - if err != nil { - return nil, err - } - for _, msg := range msgs { - totalMsgs = append(totalMsgs, convert.MsgDB2Pb(msg.Msg)) - } - } - return totalMsgs, nil + return db.GetMessageBySeqs(ctx, conversationID, userID, seqs) } func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][]*model.MsgInfoModel, userID, conversationID string, msg *model.MsgInfoModel) { + if msg == nil || msg.Msg == nil { + return + } if msg.IsRead { msg.Msg.IsRead = true } @@ -280,16 +272,53 @@ func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][ if msg.Msg.Content == "" { return } + type MsgData struct { + SendID string `json:"sendID"` + RecvID string `json:"recvID"` + GroupID string `json:"groupID"` + ClientMsgID string `json:"clientMsgID"` + ServerMsgID string `json:"serverMsgID"` + SenderPlatformID int32 `json:"senderPlatformID"` + SenderNickname string `json:"senderNickname"` + SenderFaceURL string `json:"senderFaceURL"` + SessionType int32 `json:"sessionType"` + MsgFrom int32 `json:"msgFrom"` + ContentType int32 `json:"contentType"` + Content string `json:"content"` + Seq int64 `json:"seq"` + SendTime int64 `json:"sendTime"` + CreateTime int64 `json:"createTime"` + Status int32 `json:"status"` + IsRead bool `json:"isRead"` + Options map[string]bool `json:"options,omitempty"` + OfflinePushInfo *sdkws.OfflinePushInfo `json:"offlinePushInfo"` + AtUserIDList []string `json:"atUserIDList"` + AttachedInfo string `json:"attachedInfo"` + Ex string `json:"ex"` + KeyVersion int32 `json:"keyVersion"` + DstUserIDs []string `json:"dstUserIDs"` + } var quoteMsg struct { Text string `json:"text,omitempty"` - QuoteMessage *sdkws.MsgData `json:"quoteMessage,omitempty"` + QuoteMessage *MsgData `json:"quoteMessage,omitempty"` MessageEntityList json.RawMessage `json:"messageEntityList,omitempty"` } if err := json.Unmarshal([]byte(msg.Msg.Content), "eMsg); err != nil { log.ZError(ctx, "json.Unmarshal", err) return } - if quoteMsg.QuoteMessage == nil || quoteMsg.QuoteMessage.ContentType == constant.MsgRevokeNotification { + if quoteMsg.QuoteMessage == nil || quoteMsg.QuoteMessage.Content == "" { + return + } + if quoteMsg.QuoteMessage.Content == "e30=" { + quoteMsg.QuoteMessage.Content = "{}" + data, err := json.Marshal("eMsg) + if err != nil { + return + } + msg.Msg.Content = string(data) + } + if quoteMsg.QuoteMessage.Seq <= 0 && quoteMsg.QuoteMessage.ContentType == constant.MsgRevokeNotification { return } var msgs []*model.MsgInfoModel @@ -311,9 +340,9 @@ func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][ } quoteMsg.QuoteMessage.ContentType = constant.MsgRevokeNotification if len(msgs) > 0 { - quoteMsg.QuoteMessage.Content = []byte(msgs[0].Msg.Content) + quoteMsg.QuoteMessage.Content = msgs[0].Msg.Content } else { - quoteMsg.QuoteMessage.Content = []byte("{}") + quoteMsg.QuoteMessage.Content = "{}" } data, err := json.Marshal("eMsg) if err != nil { @@ -321,9 +350,6 @@ func (db *commonMsgDatabase) handlerDBMsg(ctx context.Context, cache map[int64][ return } msg.Msg.Content = string(data) - if _, err := db.msgDocDatabase.UpdateMsg(ctx, db.msgTable.GetDocID(conversationID, msg.Msg.Seq), db.msgTable.GetMsgIndex(msg.Msg.Seq), "msg", msg.Msg); err != nil { - log.ZError(ctx, "UpdateMsgContent", err) - } } func (db *commonMsgDatabase) findMsgInfoBySeq(ctx context.Context, userID, docID string, conversationID string, seqs []int64) (totalMsgs []*model.MsgInfoModel, err error) { @@ -338,24 +364,6 @@ func (db *commonMsgDatabase) findMsgInfoBySeq(ctx context.Context, userID, docID return msgs, err } -func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID string, conversationID string, allSeqs []int64, begin, end int64) (seqMsgs []*sdkws.MsgData, err error) { - log.ZDebug(ctx, "getMsgBySeqsRange", "conversationID", conversationID, "allSeqs", allSeqs, "begin", begin, "end", end) - for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, allSeqs) { - log.ZDebug(ctx, "getMsgBySeqsRange", "docID", docID, "seqs", seqs) - msgs, err := db.findMsgInfoBySeq(ctx, userID, docID, conversationID, seqs) - if err != nil { - return nil, err - } - for _, msg := range msgs { - if msg.IsRead { - msg.Msg.IsRead = true - } - seqMsgs = append(seqMsgs, convert.MsgDB2Pb(msg.Msg)) - } - } - return seqMsgs, nil -} - // GetMsgBySeqsRange In the context of group chat, we have the following parameters: // // "maxSeq" of a conversation: It represents the maximum value of messages in the group conversation. @@ -424,37 +432,10 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin seqs = append(seqs, i) } } - - if len(seqs) == 0 { - return 0, 0, nil, nil + successMsgs, err := db.GetMessageBySeqs(ctx, conversationID, userID, seqs) + if err != nil { + return 0, 0, nil, err } - newBegin := seqs[0] - newEnd := seqs[len(seqs)-1] - var successMsgs []*sdkws.MsgData - log.ZDebug(ctx, "GetMsgBySeqsRange", "first seqs", seqs, "newBegin", newBegin, "newEnd", newEnd) - cachedMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs) - if err != nil && !errors.Is(err, redis.Nil) { - log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs) - } - successMsgs = append(successMsgs, cachedMsgs...) - log.ZDebug(ctx, "get msgs from cache", "cachedMsgs", cachedMsgs) - // get from cache or db - - if len(failedSeqs) > 0 { - log.ZDebug(ctx, "msgs not exist in redis", "seqs", failedSeqs) - mongoMsgs, err := db.getMsgBySeqsRange(ctx, userID, conversationID, failedSeqs, begin, end) - if err != nil { - - return 0, 0, nil, err - } - successMsgs = append(mongoMsgs, successMsgs...) - - //_, err = db.msg.SetMessagesToCache(ctx, conversationID, mongoMsgs) - //if err != nil { - // return 0, 0, nil, err - //} - } - return minSeq, maxSeq, successMsgs, nil } @@ -490,31 +471,9 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co newSeqs = append(newSeqs, seq) } } - if len(newSeqs) == 0 { - return minSeq, maxSeq, nil, nil - } - successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs) + successMsgs, err := db.GetMessageBySeqs(ctx, conversationID, userID, newSeqs) if err != nil { - if !errors.Is(err, redis.Nil) { - log.ZWarn(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID) - } - } - log.ZDebug(ctx, "db.seq.GetMessagesBySeq", "userID", userID, "conversationID", conversationID, "seqs", - seqs, "len(successMsgs)", len(successMsgs), "failedSeqs", failedSeqs) - - if len(failedSeqs) > 0 { - mongoMsgs, err := db.getMsgBySeqs(ctx, userID, conversationID, failedSeqs) - if err != nil { - - return 0, 0, nil, err - } - - successMsgs = append(successMsgs, mongoMsgs...) - - //_, err = db.msg.SetMessagesToCache(ctx, conversationID, mongoMsgs) - //if err != nil { - // return 0, 0, nil, err - //} + return 0, 0, nil, err } return minSeq, maxSeq, successMsgs, nil } @@ -568,174 +527,14 @@ func (db *commonMsgDatabase) GetMessagesBySeqWithBounds(ctx context.Context, use if len(newSeqs) == 0 { return isEnd, endSeq, nil, nil } - successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs) + successMsgs, err := db.GetMessageBySeqs(ctx, conversationID, userID, newSeqs) if err != nil { - if !errors.Is(err, redis.Nil) { - log.ZWarn(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID) - } - } - log.ZDebug(ctx, "db.seq.GetMessagesBySeq", "userID", userID, "conversationID", conversationID, "seqs", - seqs, "len(successMsgs)", len(successMsgs), "failedSeqs", failedSeqs) - - if len(failedSeqs) > 0 { - mongoMsgs, err := db.getMsgBySeqs(ctx, userID, conversationID, failedSeqs) - if err != nil { - - return false, 0, nil, err - } - - successMsgs = append(successMsgs, mongoMsgs...) - - //_, err = db.msg.SetMessagesToCache(ctx, conversationID, mongoMsgs) - //if err != nil { - // return 0, 0, nil, err - //} + return false, 0, nil, err } return isEnd, endSeq, successMsgs, nil } -func (db *commonMsgDatabase) DeleteConversationMsgsAndSetMinSeq(ctx context.Context, conversationID string, remainTime int64) error { - var delStruct delMsgRecursionStruct - var skip int64 - minSeq, err := db.deleteMsgRecursion(ctx, conversationID, skip, &delStruct, remainTime) - if err != nil { - return err - } - log.ZDebug(ctx, "DeleteConversationMsgsAndSetMinSeq", "conversationID", conversationID, "minSeq", minSeq) - if minSeq == 0 { - return nil - } - return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq) -} - -func (db *commonMsgDatabase) ClearUserMsgs(ctx context.Context, userID string, conversationID string, clearTime int64, lastMsgClearTime time.Time) (seqs []int64, err error) { - var index int64 - for { - // from oldest 2 newest, ASC - msgDocModel, err := db.msgDocDatabase.GetMsgDocModelByIndex(ctx, conversationID, index, 1) - if err != nil || msgDocModel.DocID == "" { - if err != nil { - if err == model.ErrMsgListNotExist { - log.ZDebug(ctx, "not doc find", "conversationID", conversationID, "userID", userID, "index", index) - } else { - log.ZError(ctx, "deleteMsgRecursion GetUserMsgListByIndex failed", err, "conversationID", conversationID, "index", index) - } - } - // If an error is reported, or the error cannot be obtained, it is physically deleted and seq delMongoMsgsPhysical(delStruct.delDocIDList) is returned to end the recursion - break - } - - index++ - - // && msgDocModel.Msg[0].Msg.SendTime > lastMsgClearTime.UnixMilli() - if len(msgDocModel.Msg) > 0 { - i := 0 - var over bool - for _, msg := range msgDocModel.Msg { - i++ - // over clear time, need to clear - if msg != nil && msg.Msg != nil && msg.Msg.SendTime+clearTime*1000 <= time.Now().UnixMilli() { - // if msg is not in del list, add to del list - if msg.Msg.SendTime+clearTime*1000 > lastMsgClearTime.UnixMilli() && !datautil.Contain(userID, msg.DelList...) { - seqs = append(seqs, msg.Msg.Seq) - } - } else { - log.ZDebug(ctx, "all msg need destruct is found", "conversationID", conversationID, "userID", userID, "index", index, "stop index", i) - over = true - break - } - } - if over { - break - } - } - } - - log.ZDebug(ctx, "ClearUserMsgs", "conversationID", conversationID, "userID", userID, "seqs", seqs) - - // have msg need to destruct - if len(seqs) > 0 { - // update min seq to clear after - userMinSeq := seqs[len(seqs)-1] + 1 // user min seq when clear after - currentUserMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID) // user min seq when clear before - if err != nil { - return nil, err - } - - // if before < after, update min seq - if currentUserMinSeq < userMinSeq { - if err := db.seqUser.SetUserMinSeq(ctx, conversationID, userID, userMinSeq); err != nil { - return nil, err - } - } - } - return seqs, nil -} - -// this is struct for recursion. -type delMsgRecursionStruct struct { - minSeq int64 - delDocIDs []string -} - -func (d *delMsgRecursionStruct) getSetMinSeq() int64 { - return d.minSeq -} - -// index 0....19(del) 20...69 -// seq 70 -// set minSeq 21 -// recursion deletes the list and returns the set minimum seq. -func (db *commonMsgDatabase) deleteMsgRecursion(ctx context.Context, conversationID string, index int64, delStruct *delMsgRecursionStruct, remainTime int64) (int64, error) { - // find from oldest list - msgDocModel, err := db.msgDocDatabase.GetMsgDocModelByIndex(ctx, conversationID, index, 1) - if err != nil || msgDocModel.DocID == "" { - if err != nil { - if err == model.ErrMsgListNotExist { - log.ZDebug(ctx, "deleteMsgRecursion ErrMsgListNotExist", "conversationID", conversationID, "index:", index) - } else { - log.ZError(ctx, "deleteMsgRecursion GetUserMsgListByIndex failed", err, "conversationID", conversationID, "index", index) - } - } - // If an error is reported, or the error cannot be obtained, it is physically deleted and seq delMongoMsgsPhysical(delStruct.delDocIDList) is returned to end the recursion - err = db.msgDocDatabase.DeleteDocs(ctx, delStruct.delDocIDs) - if err != nil { - return 0, err - } - return delStruct.getSetMinSeq() + 1, nil - } - log.ZDebug(ctx, "doc info", "conversationID", conversationID, "index", index, "docID", msgDocModel.DocID, "len", len(msgDocModel.Msg)) - if int64(len(msgDocModel.Msg)) > db.msgTable.GetSingleGocMsgNum() { - log.ZWarn(ctx, "msgs too large", nil, "length", len(msgDocModel.Msg), "docID:", msgDocModel.DocID) - } - if msgDocModel.IsFull() && msgDocModel.Msg[len(msgDocModel.Msg)-1].Msg.SendTime+(remainTime*1000) < timeutil.GetCurrentTimestampByMill() { - log.ZDebug(ctx, "doc is full and all msg is expired", "docID", msgDocModel.DocID) - delStruct.delDocIDs = append(delStruct.delDocIDs, msgDocModel.DocID) - delStruct.minSeq = msgDocModel.Msg[len(msgDocModel.Msg)-1].Msg.Seq - } else { - var delMsgIndexs []int - for i, MsgInfoModel := range msgDocModel.Msg { - if MsgInfoModel != nil && MsgInfoModel.Msg != nil { - if timeutil.GetCurrentTimestampByMill() > MsgInfoModel.Msg.SendTime+(remainTime*1000) { - delMsgIndexs = append(delMsgIndexs, i) - } - } - } - if len(delMsgIndexs) > 0 { - if err = db.msgDocDatabase.DeleteMsgsInOneDocByIndex(ctx, msgDocModel.DocID, delMsgIndexs); err != nil { - log.ZError(ctx, "deleteMsgRecursion DeleteMsgsInOneDocByIndex failed", err, "conversationID", conversationID, "index", index) - } - delStruct.minSeq = int64(msgDocModel.Msg[delMsgIndexs[len(delMsgIndexs)-1]].Msg.Seq) - } - } - seq, err := db.deleteMsgRecursion(ctx, conversationID, index+1, delStruct, remainTime) - return seq, err -} - func (db *commonMsgDatabase) DeleteMsgsPhysicalBySeqs(ctx context.Context, conversationID string, allSeqs []int64) error { - if err := db.msg.DeleteMessagesFromCache(ctx, conversationID, allSeqs); err != nil { - return err - } for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, allSeqs) { var indexes []int for _, seq := range seqs { @@ -745,13 +544,10 @@ func (db *commonMsgDatabase) DeleteMsgsPhysicalBySeqs(ctx context.Context, conve return err } } - return nil + return db.msgCache.DelMessageBySeqs(ctx, conversationID, allSeqs) } func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error { - if err := db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs); err != nil { - return err - } for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { for _, seq := range seqs { if _, err := db.msgDocDatabase.PushUnique(ctx, docID, db.msgTable.GetMsgIndex(seq), "del_list", []string{userID}); err != nil { @@ -759,7 +555,7 @@ func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID st } } } - return nil + return db.msgCache.DelMessageBySeqs(ctx, conversationID, seqs) } func (db *commonMsgDatabase) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) { @@ -770,10 +566,6 @@ func (db *commonMsgDatabase) GetMaxSeq(ctx context.Context, conversationID strin return db.seqConversation.GetMaxSeq(ctx, conversationID) } -func (db *commonMsgDatabase) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error { - return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq) -} - func (db *commonMsgDatabase) SetMinSeqs(ctx context.Context, seqs map[string]int64) error { return db.seqConversation.SetMinSeqs(ctx, seqs) } @@ -807,11 +599,11 @@ func (db *commonMsgDatabase) GetHasReadSeq(ctx context.Context, userID string, c } func (db *commonMsgDatabase) SetSendMsgStatus(ctx context.Context, id string, status int32) error { - return db.msg.SetSendMsgStatus(ctx, id, status) + return db.msgCache.SetSendMsgStatus(ctx, id, status) } func (db *commonMsgDatabase) GetSendMsgStatus(ctx context.Context, id string) (int32, error) { - return db.msg.GetSendMsgStatus(ctx, id) + return db.msgCache.GetSendMsgStatus(ctx, id) } func (db *commonMsgDatabase) GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error) { @@ -848,26 +640,11 @@ func (db *commonMsgDatabase) GetMinMaxSeqMongo(ctx context.Context, conversation return } -func (db *commonMsgDatabase) RangeUserSendCount( - ctx context.Context, - start time.Time, - end time.Time, - group bool, - ase bool, - pageNumber int32, - showNumber int32, -) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) { +func (db *commonMsgDatabase) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) { return db.msgDocDatabase.RangeUserSendCount(ctx, start, end, group, ase, pageNumber, showNumber) } -func (db *commonMsgDatabase) RangeGroupSendCount( - ctx context.Context, - start time.Time, - end time.Time, - ase bool, - pageNumber int32, - showNumber int32, -) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) { +func (db *commonMsgDatabase) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) { return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber) } @@ -907,60 +684,11 @@ func (db *commonMsgDatabase) FindOneByDocIDs(ctx context.Context, conversationID return totalMsgs, nil } -func (db *commonMsgDatabase) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) { - db.msgDocDatabase.ConvertMsgsDocLen(ctx, conversationIDs) +func (db *commonMsgDatabase) GetRandBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { + return db.msgDocDatabase.GetRandBeforeMsg(ctx, ts, limit) } -func (db *commonMsgDatabase) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { - var msgs []*model.MsgDocModel - for i := 0; i < len(docIDs); i += 1000 { - end := i + 1000 - if end > len(docIDs) { - end = len(docIDs) - } - - res, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, docIDs[i:end], limit) - if err != nil { - return nil, err - } - msgs = append(msgs, res...) - - if len(msgs) >= limit { - return msgs[:limit], nil - } - } - return msgs, nil -} - -func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error) { - var notNull int - index := make([]int, 0, len(doc.Msg)) - for i, message := range doc.Msg { - if message.Msg != nil { - notNull++ - if message.Msg.SendTime < ts { - index = append(index, i) - } - } - } - if len(index) == 0 { - return index, nil - } - maxSeq := doc.Msg[index[len(index)-1]].Msg.Seq - conversationID := doc.DocID[:strings.LastIndex(doc.DocID, ":")] - if err := db.setMinSeq(ctx, conversationID, maxSeq+1); err != nil { - return index, err - } - if len(index) == notNull { - log.ZDebug(ctx, "Delete db in Doc", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) - return index, db.msgDocDatabase.DeleteDoc(ctx, doc.DocID) - } else { - log.ZDebug(ctx, "delete db in index", "DocID", doc.DocID, "index", index, "maxSeq", maxSeq) - return index, db.msgDocDatabase.DeleteMsgByIndex(ctx, doc.DocID, index) - } -} - -func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID string, seq int64) error { +func (db *commonMsgDatabase) SetMinSeq(ctx context.Context, conversationID string, seq int64) error { dbSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID) if err != nil { if errors.Is(errs.Unwrap(err), redis.Nil) { @@ -974,10 +702,6 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin return db.seqConversation.SetMinSeq(ctx, conversationID, seq) } -func (db *commonMsgDatabase) GetDocIDs(ctx context.Context) ([]string, error) { - return db.msgDocDatabase.GetDocIDs(ctx) -} - func (db *commonMsgDatabase) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) { return db.seqConversation.GetCacheMaxSeqWithTime(ctx, conversationIDs) } @@ -990,3 +714,105 @@ func (db *commonMsgDatabase) GetMaxSeqsWithTime(ctx context.Context, conversatio // todo: only the time in the redis cache will be taken, not the message time return db.seqConversation.GetMaxSeqsWithTime(ctx, conversationIDs) } + +func (db *commonMsgDatabase) DeleteDoc(ctx context.Context, docID string) error { + index := strings.LastIndex(docID, ":") + if index <= 0 { + return errs.ErrInternalServer.WrapMsg("docID is invalid", "docID", docID) + } + index, err := strconv.Atoi(docID[index+1:]) + if err != nil { + return errs.WrapMsg(err, "strconv.Atoi", "docID", docID) + } + conversationID := docID[:index] + seqs := make([]int64, db.msgTable.GetSingleGocMsgNum()) + minSeq := db.msgTable.GetMinSeq(index) + for i := range seqs { + seqs[i] = minSeq + int64(i) + } + if err := db.msgDocDatabase.DeleteDoc(ctx, docID); err != nil { + return err + } + return db.msgCache.DelMessageBySeqs(ctx, conversationID, seqs) +} + +func (db *commonMsgDatabase) GetLastMessageSeqByTime(ctx context.Context, conversationID string, time int64) (int64, error) { + return db.msgDocDatabase.GetLastMessageSeqByTime(ctx, conversationID, time) +} + +func (db *commonMsgDatabase) handlerDeleteAndRevoked(ctx context.Context, userID string, msgs []*model.MsgInfoModel) { + for i := range msgs { + msg := msgs[i] + if msg == nil || msg.Msg == nil { + continue + } + msg.Msg.IsRead = msg.IsRead + if datautil.Contain(userID, msg.DelList...) { + msg.Msg.Content = "" + msg.Msg.Status = constant.MsgDeleted + } + if msg.Revoke == nil { + continue + } + msg.Msg.ContentType = constant.MsgRevokeNotification + revokeContent := sdkws.MessageRevokedContent{ + RevokerID: msg.Revoke.UserID, + RevokerRole: msg.Revoke.Role, + ClientMsgID: msg.Msg.ClientMsgID, + RevokerNickname: msg.Revoke.Nickname, + RevokeTime: msg.Revoke.Time, + SourceMessageSendTime: msg.Msg.SendTime, + SourceMessageSendID: msg.Msg.SendID, + SourceMessageSenderNickname: msg.Msg.SenderNickname, + SessionType: msg.Msg.SessionType, + Seq: msg.Msg.Seq, + Ex: msg.Msg.Ex, + } + data, err := jsonutil.JsonMarshal(&revokeContent) + if err != nil { + log.ZWarn(ctx, "handlerDeleteAndRevoked JsonMarshal MessageRevokedContent", err, "msg", msg) + continue + } + elem := sdkws.NotificationElem{ + Detail: string(data), + } + content, err := jsonutil.JsonMarshal(&elem) + if err != nil { + log.ZWarn(ctx, "handlerDeleteAndRevoked JsonMarshal NotificationElem", err, "msg", msg) + continue + } + msg.Msg.Content = string(content) + } +} + +func (db *commonMsgDatabase) handlerQuote(ctx context.Context, userID, conversationID string, msgs []*model.MsgInfoModel) { + temp := make(map[int64][]*model.MsgInfoModel) + for i := range msgs { + db.handlerDBMsg(ctx, temp, userID, conversationID, msgs[i]) + } +} + +func (db *commonMsgDatabase) GetMessageBySeqs(ctx context.Context, conversationID string, userID string, seqs []int64) ([]*sdkws.MsgData, error) { + msgs, err := db.msgCache.GetMessageBySeqs(ctx, conversationID, seqs) + if err != nil { + return nil, err + } + db.handlerDeleteAndRevoked(ctx, userID, msgs) + db.handlerQuote(ctx, userID, conversationID, msgs) + seqMsgs := make(map[int64]*model.MsgInfoModel) + for i, msg := range msgs { + if msg.Msg == nil { + continue + } + seqMsgs[msg.Msg.Seq] = msgs[i] + } + res := make([]*sdkws.MsgData, 0, len(seqs)) + for _, seq := range seqs { + if v, ok := seqMsgs[seq]; ok { + res = append(res, convert.MsgDB2Pb(v.Msg)) + } else { + res = append(res, &sdkws.MsgData{Seq: seq}) + } + } + return res, nil +} diff --git a/pkg/common/storage/controller/msg_transfer.go b/pkg/common/storage/controller/msg_transfer.go index 1ecd786aa..f4c0c6270 100644 --- a/pkg/common/storage/controller/msg_transfer.go +++ b/pkg/common/storage/controller/msg_transfer.go @@ -2,10 +2,11 @@ package controller import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/open-im-server/v3/pkg/common/config" - "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" @@ -50,7 +51,7 @@ func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUse } return &msgTransferDatabase{ msgDocDatabase: msgDocModel, - msg: msg, + msgCache: msg, seqUser: seqUser, seqConversation: seqConversation, producerToMongo: producerToMongo, @@ -61,7 +62,7 @@ func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUse type msgTransferDatabase struct { msgDocDatabase database.Msg msgTable model.MsgDocModel - msg cache.MsgCache + msgCache cache.MsgCache seqConversation cache.SeqConversationCache seqUser cache.SeqUser producerToMongo *kafka.Producer @@ -73,10 +74,12 @@ func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversat return errs.ErrArgs.WrapMsg("msgList is empty") } msgs := make([]any, len(msgList)) + seqs := make([]int64, len(msgList)) for i, msg := range msgList { if msg == nil { continue } + seqs[i] = msg.Seq var offlinePushModel *model.OfflinePushModel if msg.OfflinePushInfo != nil { offlinePushModel = &model.OfflinePushModel{ @@ -114,7 +117,11 @@ func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversat Ex: msg.Ex, } } - return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq) + if err := db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq); err != nil { + return err + } + //return db.msgCache.DelMessageBySeqs(ctx, conversationID, seqs) + return nil } func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { @@ -219,7 +226,7 @@ func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversatio } func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error { - return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs) + return db.msgCache.DelMessageBySeqs(ctx, conversationID, seqs) } func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, userHasReadMap map[string]int64, err error) { @@ -238,20 +245,22 @@ func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conver isNew = currentMaxSeq == 0 lastMaxSeq := currentMaxSeq userSeqMap := make(map[string]int64) + seqs := make([]int64, 0, lenList) for _, m := range msgs { currentMaxSeq++ m.Seq = currentMaxSeq userSeqMap[m.SendID] = m.Seq + seqs = append(seqs, m.Seq) } - - failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs) - if err != nil { - prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum)) - log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID) - } else { - prommetrics.MsgInsertRedisSuccessCounter.Inc() + msgToDB := func(msg *sdkws.MsgData) *model.MsgInfoModel { + return &model.MsgInfoModel{ + Msg: convert.MsgPb2DB(msg), + } } - return lastMaxSeq, isNew, userSeqMap, errs.Wrap(err) + if err := db.msgCache.SetMessageBySeqs(ctx, conversationID, datautil.Slice(msgs, msgToDB)); err != nil { + return 0, false, nil, err + } + return lastMaxSeq, isNew, userSeqMap, nil } func (db *msgTransferDatabase) SetHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error { diff --git a/pkg/common/storage/controller/s3.go b/pkg/common/storage/controller/s3.go index 4e5ad18b6..6693d2dde 100644 --- a/pkg/common/storage/controller/s3.go +++ b/pkg/common/storage/controller/s3.go @@ -24,7 +24,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" - "github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/s3" "github.com/openimsdk/tools/s3/cont" "github.com/redis/go-redis/v9" @@ -40,11 +39,10 @@ type S3Database interface { SetObject(ctx context.Context, info *model.Object) error StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) - FindNeedDeleteObjectByDB(ctx context.Context, duration time.Time, needDelType []string, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) - DeleteObject(ctx context.Context, name string) error - DeleteSpecifiedData(ctx context.Context, engine string, name string) error - FindModelsByKey(ctx context.Context, key string) (objects []*model.Object, err error) + FindExpirationObject(ctx context.Context, engine string, expiration time.Time, needDelType []string, count int64) ([]*model.Object, error) + DeleteSpecifiedData(ctx context.Context, engine string, name []string) error DelS3Key(ctx context.Context, engine string, keys ...string) error + GetKeyCount(ctx context.Context, engine string, key string) (int64, error) } func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj database.ObjectInfo) S3Database { @@ -120,21 +118,19 @@ func (s *s3Database) StatObject(ctx context.Context, name string) (*s3.ObjectInf func (s *s3Database) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) { return s.s3.FormData(ctx, name, size, contentType, duration) } -func (s *s3Database) FindNeedDeleteObjectByDB(ctx context.Context, duration time.Time, needDelType []string, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) { - return s.db.FindNeedDeleteObjectByDB(ctx, duration, needDelType, pagination) + +func (s *s3Database) FindExpirationObject(ctx context.Context, engine string, expiration time.Time, needDelType []string, count int64) ([]*model.Object, error) { + return s.db.FindExpirationObject(ctx, engine, expiration, needDelType, count) } -func (s *s3Database) DeleteObject(ctx context.Context, name string) error { - return s.s3.DeleteObject(ctx, name) +func (s *s3Database) GetKeyCount(ctx context.Context, engine string, key string) (int64, error) { + return s.db.GetKeyCount(ctx, engine, key) } -func (s *s3Database) DeleteSpecifiedData(ctx context.Context, engine string, name string) error { + +func (s *s3Database) DeleteSpecifiedData(ctx context.Context, engine string, name []string) error { return s.db.Delete(ctx, engine, name) } -func (s *s3Database) FindModelsByKey(ctx context.Context, key string) (objects []*model.Object, err error) { - return s.db.FindModelsByKey(ctx, key) -} - func (s *s3Database) DelS3Key(ctx context.Context, engine string, keys ...string) error { return s.s3cache.DelS3Key(ctx, engine, keys...) } diff --git a/pkg/common/storage/database/conversation.go b/pkg/common/storage/database/conversation.go index 30ca01ee7..1fb53cfed 100644 --- a/pkg/common/storage/database/conversation.go +++ b/pkg/common/storage/database/conversation.go @@ -42,4 +42,5 @@ type Conversation interface { GetConversationIDsNeedDestruct(ctx context.Context) ([]*model.Conversation, error) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) + FindRandConversation(ctx context.Context, ts int64, limit int) ([]*model.Conversation, error) } diff --git a/pkg/common/storage/database/mgo/conversation.go b/pkg/common/storage/database/mgo/conversation.go index 10e223c89..851ec99c4 100644 --- a/pkg/common/storage/database/mgo/conversation.go +++ b/pkg/common/storage/database/mgo/conversation.go @@ -228,3 +228,35 @@ func (c *ConversationMgo) GetConversationNotReceiveMessageUserIDs(ctx context.Co func (c *ConversationMgo) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) { return c.version.FindChangeLog(ctx, userID, version, limit) } + +func (c *ConversationMgo) FindRandConversation(ctx context.Context, ts int64, limit int) ([]*model.Conversation, error) { + pipeline := []bson.M{ + { + "$match": bson.M{ + "is_msg_destruct": true, + "msg_destruct_time": bson.M{"$ne": 0}, + }, + }, + { + "$addFields": bson.M{ + "next_msg_destruct_timestamp": bson.M{ + "$add": []any{ + bson.M{ + "$toLong": "$latest_msg_destruct_time", + }, "$msg_destruct_time"}, + }, + }, + }, + { + "$match": bson.M{ + "next_msg_destruct_timestamp": bson.M{"$lt": ts}, + }, + }, + { + "$sample": bson.M{ + "size": limit, + }, + }, + } + return mongoutil.Aggregate[*model.Conversation](ctx, c.coll, pipeline) +} diff --git a/pkg/common/storage/database/mgo/msg.go b/pkg/common/storage/database/mgo/msg.go index fc1fe47ea..03ebff611 100644 --- a/pkg/common/storage/database/mgo/msg.go +++ b/pkg/common/storage/database/mgo/msg.go @@ -7,15 +7,12 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/tools/utils/datautil" - "golang.org/x/exp/rand" - "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/errs" - "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/jsonutil" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" @@ -42,12 +39,6 @@ type MsgMgo struct { model model.MsgDocModel } -func (m *MsgMgo) PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []model.MsgInfoModel) error { - filter := bson.M{"doc_id": docID} - update := bson.M{"$push": bson.M{"msgs": bson.M{"$each": msgsToMongo}}} - return mongoutil.UpdateOne(ctx, m.coll, filter, update, false) -} - func (m *MsgMgo) Create(ctx context.Context, msg *model.MsgDocModel) error { return mongoutil.InsertMany(ctx, m.coll, []*model.MsgDocModel{msg}) } @@ -80,16 +71,6 @@ func (m *MsgMgo) PushUnique(ctx context.Context, docID string, index int64, key return mongoutil.UpdateOneResult(ctx, m.coll, filter, update) } -func (m *MsgMgo) UpdateMsgContent(ctx context.Context, docID string, index int64, msg []byte) error { - filter := bson.M{"doc_id": docID} - update := bson.M{"$set": bson.M{fmt.Sprintf("msgs.%d.msg", index): msg}} - return mongoutil.UpdateOne(ctx, m.coll, filter, update, false) -} - -func (m *MsgMgo) IsExistDocID(ctx context.Context, docID string) (bool, error) { - return mongoutil.Exist(ctx, m.coll, bson.M{"doc_id": docID}) -} - func (m *MsgMgo) FindOneByDocID(ctx context.Context, docID string) (*model.MsgDocModel, error) { return mongoutil.FindOne[*model.MsgDocModel](ctx, m.coll, bson.M{"doc_id": docID}) } @@ -218,13 +199,6 @@ func (m *MsgMgo) GetOldestMsg(ctx context.Context, conversationID string) (*mode } } -func (m *MsgMgo) DeleteDocs(ctx context.Context, docIDs []string) error { - if len(docIDs) == 0 { - return nil - } - return mongoutil.DeleteMany(ctx, m.coll, bson.M{"doc_id": bson.M{"$in": docIDs}}) -} - func (m *MsgMgo) GetMsgDocModelByIndex(ctx context.Context, conversationID string, index, sort int64) (*model.MsgDocModel, error) { if sort != 1 && sort != -1 { return nil, errs.ErrArgs.WrapMsg("mongo sort must be 1 or -1") @@ -279,95 +253,6 @@ func (m *MsgMgo) MarkSingleChatMsgsAsRead(ctx context.Context, userID string, do return nil } -//func (m *MsgMgo) searchCount(ctx context.Context, filter any) (int64, error) { -// -// return nil, nil -//} - -//func (m *MsgMgo) searchMessage(ctx context.Context, filter any, nextID primitive.ObjectID, content bool, limit int) (int64, []*model.MsgInfoModel, primitive.ObjectID, error) { -// var pipeline bson.A -// if !nextID.IsZero() { -// pipeline = append(pipeline, bson.M{"$match": bson.M{"_id": bson.M{"$gt": nextID}}}) -// } -// pipeline = append(pipeline, -// bson.M{"$match": filter}, -// bson.M{"$limit": limit}, -// bson.M{"$unwind": "$msgs"}, -// bson.M{"$match": filter}, -// bson.M{ -// "$group": bson.M{ -// "_id": "$_id", -// "doc_id": bson.M{ -// "$first": "$doc_id", -// }, -// "msgs": bson.M{"$push": "$msgs"}, -// }, -// }, -// ) -// if !content { -// pipeline = append(pipeline, -// bson.M{ -// "$project": bson.M{ -// "_id": 1, -// "count": bson.M{"$size": "$msgs"}, -// }, -// }, -// ) -// type result struct { -// ID primitive.ObjectID `bson:"_id"` -// Count int64 `bson:"count"` -// } -// res, err := mongoutil.Aggregate[result](ctx, m.coll, pipeline) -// if err != nil { -// return 0, nil, primitive.ObjectID{}, err -// } -// if len(res) == 0 { -// return 0, nil, primitive.ObjectID{}, nil -// } -// var count int64 -// for _, r := range res { -// count += r.Count -// } -// return count, nil, res[len(res)-1].ID, nil -// } -// type result struct { -// ID primitive.ObjectID `bson:"_id"` -// Msg []*model.MsgInfoModel `bson:"msgs"` -// } -// res, err := mongoutil.Aggregate[result](ctx, m.coll, pipeline) -// if err != nil { -// return 0, nil, primitive.ObjectID{}, err -// } -// if len(res) == 0 { -// return 0, nil, primitive.ObjectID{}, err -// } -// var count int -// for _, r := range res { -// count += len(r.Msg) -// } -// msgs := make([]*model.MsgInfoModel, 0, count) -// for _, r := range res { -// msgs = append(msgs, r.Msg...) -// } -// return int64(count), msgs, res[len(res)-1].ID, nil -//} - -/* - -db.msg3.aggregate( - [ - { - "$match": { - "doc_id": "si_7009965934_8710838466:0" - }, - - } - ] -) - - -*/ - type searchMessageIndex struct { ID primitive.ObjectID `bson:"_id"` Index []int64 `bson:"index"` @@ -512,22 +397,6 @@ func (m *MsgMgo) searchMessage(ctx context.Context, req *msg.SearchMessageReq) ( } } -func (m *MsgMgo) getDocRange(ctx context.Context, id primitive.ObjectID, index []int64) ([]*model.MsgInfoModel, error) { - if len(index) == 0 { - return nil, nil - } - - pipeline := bson.A{ - bson.M{"$match": bson.M{"_id": id}}, - bson.M{"$project": "$msgs"}, - } - msgs, err := mongoutil.Aggregate[*model.MsgInfoModel](ctx, m.coll, pipeline) - if err != nil { - return nil, err - } - return msgs, nil -} - func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int64, []*model.MsgInfoModel, error) { count, data, err := m.searchMessage(ctx, req) if err != nil { @@ -556,143 +425,6 @@ func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) ( return count, msgs, nil } -//func (m *MsgMgo) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int32, []*model.MsgInfoModel, error) { -// where := make(bson.A, 0, 6) -// if req.RecvID != "" { -// if req.SessionType == constant.ReadGroupChatType { -// where = append(where, bson.M{ -// "$or": bson.A{ -// bson.M{"doc_id": "^n_" + req.RecvID + ":"}, -// bson.M{"doc_id": "^sg_" + req.RecvID + ":"}, -// }, -// }) -// } else { -// where = append(where, bson.M{"msgs.msg.recv_id": req.RecvID}) -// } -// } -// if req.SendID != "" { -// where = append(where, bson.M{"msgs.msg.send_id": req.SendID}) -// } -// if req.ContentType != 0 { -// where = append(where, bson.M{"msgs.msg.content_type": req.ContentType}) -// } -// if req.SessionType != 0 { -// where = append(where, bson.M{"msgs.msg.session_type": req.SessionType}) -// } -// if req.SendTime != "" { -// sendTime, err := time.Parse(time.DateOnly, req.SendTime) -// if err != nil { -// return 0, nil, errs.ErrArgs.WrapMsg("invalid sendTime", "req", req.SendTime, "format", time.DateOnly, "cause", err.Error()) -// } -// where = append(where, -// bson.M{ -// "msgs.msg.send_time": bson.M{ -// "$gte": sendTime.UnixMilli(), -// }, -// }, -// bson.M{ -// "msgs.msg.send_time": bson.M{ -// "$lt": sendTime.Add(time.Hour * 24).UnixMilli(), -// }, -// }, -// ) -// } -// opt := options.Find().SetLimit(100) -// res, err := mongoutil.Find[model.MsgDocModel](ctx, m.coll, bson.M{"$and": where}, opt) -// if err != nil { -// return 0, nil, err -// } -// _ = res -// fmt.Println() -// -// return 0, nil, nil -// pipeline := bson.A{ -// bson.M{ -// "$unwind": "$msgs", -// }, -// } -// if len(where) > 0 { -// pipeline = append(pipeline, bson.M{ -// "$match": bson.M{"$and": where}, -// }) -// } -// pipeline = append(pipeline, -// bson.M{ -// "$project": bson.M{ -// "_id": 0, -// "msg": "$msgs.msg", -// }, -// }, -// bson.M{ -// "$count": "count", -// }, -// ) -// //count, err := mongoutil.Aggregate[int32](ctx, m.coll, pipeline) -// //if err != nil { -// // return 0, nil, err -// //} -// //if len(count) == 0 || count[0] == 0 { -// // return 0, nil, nil -// //} -// count := []int32{0} -// pipeline = pipeline[:len(pipeline)-1] -// pipeline = append(pipeline, -// bson.M{ -// "$skip": (req.Pagination.GetPageNumber() - 1) * req.Pagination.GetShowNumber(), -// }, -// bson.M{ -// "$limit": req.Pagination.GetShowNumber(), -// }, -// ) -// msgs, err := mongoutil.Aggregate[*model.MsgInfoModel](ctx, m.coll, pipeline) -// if err != nil { -// return 0, nil, err -// } -// for i := range msgs { -// msgInfo := msgs[i] -// if msgInfo == nil || msgInfo.Msg == nil { -// continue -// } -// if msgInfo.Revoke != nil { -// revokeContent := sdkws.MessageRevokedContent{ -// RevokerID: msgInfo.Revoke.UserID, -// RevokerRole: msgInfo.Revoke.Role, -// ClientMsgID: msgInfo.Msg.ClientMsgID, -// RevokerNickname: msgInfo.Revoke.Nickname, -// RevokeTime: msgInfo.Revoke.Time, -// SourceMessageSendTime: msgInfo.Msg.SendTime, -// SourceMessageSendID: msgInfo.Msg.SendID, -// SourceMessageSenderNickname: msgInfo.Msg.SenderNickname, -// SessionType: msgInfo.Msg.SessionType, -// Seq: msgInfo.Msg.Seq, -// Ex: msgInfo.Msg.Ex, -// } -// data, err := jsonutil.JsonMarshal(&revokeContent) -// if err != nil { -// return 0, nil, errs.WrapMsg(err, "json.Marshal revokeContent") -// } -// elem := sdkws.NotificationElem{Detail: string(data)} -// content, err := jsonutil.JsonMarshal(&elem) -// if err != nil { -// return 0, nil, errs.WrapMsg(err, "json.Marshal elem") -// } -// msgInfo.Msg.ContentType = constant.MsgRevokeNotification -// msgInfo.Msg.Content = string(content) -// } -// } -// //start := (req.Pagination.PageNumber - 1) * req.Pagination.ShowNumber -// //n := int32(len(msgs)) -// //if start >= n { -// // return n, []*relation.MsgInfoModel{}, nil -// //} -// //if start+req.Pagination.ShowNumber < n { -// // msgs = msgs[start : start+req.Pagination.ShowNumber] -// //} else { -// // msgs = msgs[start:] -// //} -// return count[0], msgs, nil -//} - func (m *MsgMgo) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) { var sort int if ase { @@ -1178,104 +910,18 @@ func (m *MsgMgo) RangeGroupSendCount(ctx context.Context, start time.Time, end t return result[0].MsgCount, result[0].UserCount, groups, dateCount, nil } -func (m *MsgMgo) ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) { - for _, conversationID := range conversationIDs { - regex := primitive.Regex{Pattern: fmt.Sprintf("^%s:", conversationID)} - msgDocs, err := mongoutil.Find[*model.MsgDocModel](ctx, m.coll, bson.M{"doc_id": regex}) - if err != nil { - log.ZError(ctx, "convertAll find msg doc failed", err, "conversationID", conversationID) - continue - } - if len(msgDocs) < 1 { - continue - } - log.ZDebug(ctx, "msg doc convert", "conversationID", conversationID, "len(msgDocs)", len(msgDocs)) - if len(msgDocs[0].Msg) == int(m.model.GetSingleGocMsgNum5000()) { - if err := mongoutil.DeleteMany(ctx, m.coll, bson.M{"doc_id": regex}); err != nil { - log.ZError(ctx, "convertAll delete many failed", err, "conversationID", conversationID) - continue - } - var newMsgDocs []any - for _, msgDoc := range msgDocs { - if int64(len(msgDoc.Msg)) == m.model.GetSingleGocMsgNum() { - continue - } - var index int64 - for index < int64(len(msgDoc.Msg)) { - msg := msgDoc.Msg[index] - if msg != nil && msg.Msg != nil { - msgDocModel := model.MsgDocModel{DocID: m.model.GetDocID(conversationID, msg.Msg.Seq)} - end := index + m.model.GetSingleGocMsgNum() - if int(end) >= len(msgDoc.Msg) { - msgDocModel.Msg = msgDoc.Msg[index:] - } else { - msgDocModel.Msg = msgDoc.Msg[index:end] - } - newMsgDocs = append(newMsgDocs, msgDocModel) - index = end - } else { - break - } - } - } - if err = mongoutil.InsertMany(ctx, m.coll, newMsgDocs); err != nil { - log.ZError(ctx, "convertAll insert many failed", err, "conversationID", conversationID, "len(newMsgDocs)", len(newMsgDocs)) - } else { - log.ZDebug(ctx, "msg doc convert", "conversationID", conversationID, "len(newMsgDocs)", len(newMsgDocs)) - } - } - } -} - -func (m *MsgMgo) GetDocIDs(ctx context.Context) ([]string, error) { - limit := 5000 - var skip int - var docIDs []string - var offset int - - count, err := m.coll.CountDocuments(ctx, bson.M{}) - if err != nil { - return nil, err - } - - if count < int64(limit) { - skip = 0 - } else { - rand.Seed(uint64(time.Now().UnixMilli())) - skip = rand.Intn(int(count / int64(limit))) - offset = skip * limit - } - log.ZDebug(ctx, "offset", "skip", skip, "offset", offset) - res, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ - { - "$project": bson.M{ - "doc_id": 1, - }, - }, - { - "$skip": offset, - }, - { - "$limit": limit, - }, - }) - - for _, doc := range res { - docIDs = append(docIDs, doc.DocID) - } - - return docIDs, errs.Wrap(err) -} - -func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) { +func (m *MsgMgo) GetRandBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) { return mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, []bson.M{ { "$match": bson.M{ - "doc_id": bson.M{ - "$in": docIDs, - }, - "msgs.msg.send_time": bson.M{ - "$lt": ts, + "msgs": bson.M{ + "$not": bson.M{ + "$elemMatch": bson.M{ + "msg.send_time": bson.M{ + "$gt": ts, + }, + }, + }, }, }, }, @@ -1288,70 +934,117 @@ func (m *MsgMgo) GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, li }, }, { - "$limit": limit, + "$sample": bson.M{ + "size": limit, + }, }, }) } -func (m *MsgMgo) DeleteMsgByIndex(ctx context.Context, docID string, index []int) error { - if len(index) == 0 { - return nil - } - model := &model.MsgInfoModel{DelList: []string{}} - set := make(map[string]any) - for i := range index { - set[fmt.Sprintf("msgs.%d", i)] = model - } - return mongoutil.UpdateOne(ctx, m.coll, bson.M{"doc_id": docID}, bson.M{"$set": set}, true) -} - -//func (m *MsgMgo) ClearMsg(ctx context.Context, t time.Time) (int64, error) { -// ts := t.UnixMilli() -// var count int64 -// for { -// msgs, err := m.GetBeforeMsg(ctx, ts, 100) -// if err != nil { -// return count, err -// } -// if len(msgs) == 0 { -// return count, nil -// } -// for _, msg := range msgs { -// num, err := m.deleteOneMsg(ctx, ts, msg) -// count += num -// if err != nil { -// return count, err -// } -// } -// } -//} - func (m *MsgMgo) DeleteDoc(ctx context.Context, docID string) error { return mongoutil.DeleteOne(ctx, m.coll, bson.M{"doc_id": docID}) } -//func (m *MsgMgo) DeleteDocMsg(ctx context.Context, ts int64, doc *relation.MsgDocModel) (int64, error) { -// var notNull int -// index := make([]int, 0, len(doc.Msg)) -// for i, message := range doc.Msg { -// if message.Msg != nil { -// notNull++ -// if message.Msg.SendTime < ts { -// index = append(index, i) -// } -// } -// } -// if len(index) == 0 { -// return 0, errs.New("no msg to delete").WrapMsg("deleteOneMsg", "docID", doc.DocID) -// } -// if len(index) == notNull { -// if err := m.DeleteDoc(ctx, doc.DocID); err != nil { -// return 0, err -// } -// } else { -// if err := m.setNullMsg(ctx, doc.DocID, index); err != nil { -// return 0, err -// } -// } -// return int64(len(index)), nil -//} +func (m *MsgMgo) GetLastMessageSeqByTime(ctx context.Context, conversationID string, time int64) (int64, error) { + pipeline := []bson.M{ + { + "$match": bson.M{ + "doc_id": bson.M{ + "$regex": fmt.Sprintf("^%s", conversationID), + }, + }, + }, + { + "$match": bson.M{ + "msgs.msg.send_time": bson.M{ + "$lte": time, + }, + }, + }, + { + "$sort": bson.M{ + "_id": -1, + }, + }, + { + "$limit": 1, + }, + { + "$project": bson.M{ + "_id": 0, + "doc_id": 1, + "msgs.msg.send_time": 1, + "msgs.msg.seq": 1, + }, + }, + } + res, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, pipeline) + if err != nil { + return 0, err + } + if len(res) == 0 { + return 0, nil + } + var seq int64 + for _, v := range res[0].Msg { + if v.Msg == nil { + continue + } + if v.Msg.SendTime <= time { + seq = v.Msg.Seq + } + } + return seq, nil +} + +func (m *MsgMgo) onlyFindDocIndex(ctx context.Context, docID string, indexes []int64) ([]*model.MsgInfoModel, error) { + if len(indexes) == 0 { + return nil, nil + } + pipeline := mongo.Pipeline{ + bson.D{{Key: "$match", Value: bson.D{ + {Key: "doc_id", Value: docID}, + }}}, + bson.D{{Key: "$project", Value: bson.D{ + {Key: "_id", Value: 0}, + {Key: "doc_id", Value: 1}, + {Key: "msgs", Value: bson.D{ + {Key: "$map", Value: bson.D{ + {Key: "input", Value: indexes}, + {Key: "as", Value: "index"}, + {Key: "in", Value: bson.D{ + {Key: "$arrayElemAt", Value: bson.A{"$msgs", "$$index"}}, + }}, + }}, + }}, + }}}, + } + msgDocModel, err := mongoutil.Aggregate[*model.MsgDocModel](ctx, m.coll, pipeline) + if err != nil { + return nil, err + } + if len(msgDocModel) == 0 { + return nil, nil + } + return msgDocModel[0].Msg, nil +} + +func (m *MsgMgo) FindSeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) { + if len(seqs) == 0 { + return nil, nil + } + result := make([]*model.MsgInfoModel, 0, len(seqs)) + for docID, seqs := range m.model.GetDocIDSeqsMap(conversationID, seqs) { + res, err := m.onlyFindDocIndex(ctx, docID, datautil.Slice(seqs, m.model.GetMsgIndex)) + if err != nil { + return nil, err + } + for i, re := range res { + if re == nil || re.Msg == nil { + continue + } + result = append(result, res[i]) + } + } + return result, nil +} diff --git a/pkg/common/storage/database/mgo/msg_test.go b/pkg/common/storage/database/mgo/msg_test.go index 5aed4dc51..992090552 100644 --- a/pkg/common/storage/database/mgo/msg_test.go +++ b/pkg/common/storage/database/mgo/msg_test.go @@ -3,12 +3,11 @@ package mgo import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/protocol/msg" - "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/db/mongoutil" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + "math" "math/rand" "strconv" "testing" @@ -16,35 +15,45 @@ import ( ) func TestName1(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) - defer cancel() - cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))) - - v := &MsgMgo{ - coll: cli.Database("openim_v3").Collection("msg3"), - } - - req := &msg.SearchMessageReq{ - //RecvID: "3187706596", - //SendID: "7009965934", - ContentType: 101, - //SendTime: "2024-05-06", - //SessionType: 3, - Pagination: &sdkws.RequestPagination{ - PageNumber: 1, - ShowNumber: 10, - }, - } - total, res, err := v.SearchMessage(ctx, req) - if err != nil { - panic(err) - } - - for i, re := range res { - t.Logf("%d => %d | %+v", i+1, re.Msg.Seq, re.Msg.Content) - } - - t.Log(total) + //ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) + //defer cancel() + //cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.66:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))) + // + //v := &MsgMgo{ + // coll: cli.Database("openim_v3").Collection("msg3"), + //} + // + //req := &msg.SearchMessageReq{ + // //RecvID: "3187706596", + // //SendID: "7009965934", + // ContentType: 101, + // //SendTime: "2024-05-06", + // //SessionType: 3, + // Pagination: &sdkws.RequestPagination{ + // PageNumber: 1, + // ShowNumber: 10, + // }, + //} + //total, res, err := v.SearchMessage(ctx, req) + //if err != nil { + // panic(err) + //} + // + //for i, re := range res { + // t.Logf("%d => %d | %+v", i+1, re.Msg.Seq, re.Msg.Content) + //} + // + //t.Log(total) + // + //msg, err := NewMsgMongo(cli.Database("openim_v3")) + //if err != nil { + // panic(err) + //} + //res, err := msg.GetBeforeMsg(ctx, time.Now().UnixMilli(), []string{"1:0"}, 1000) + //if err != nil { + // panic(err) + //} + //t.Log(len(res)) } func TestName10(t *testing.T) { @@ -73,3 +82,33 @@ func TestName10(t *testing.T) { } } + +func TestName3(t *testing.T) { + t.Log(uint64(math.MaxUint64)) + t.Log(int64(math.MaxInt64)) + + t.Log(int64(math.MinInt64)) +} + +func TestName4(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) + defer cancel() + cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.66:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))) + + msg, err := NewMsgMongo(cli.Database("openim_v3")) + if err != nil { + panic(err) + } + ts := time.Now().Add(-time.Hour * 24 * 5).UnixMilli() + t.Log(ts) + res, err := msg.GetLastMessageSeqByTime(ctx, "sg_1523453548", ts) + if err != nil { + panic(err) + } + t.Log(res) +} + +func TestName5(t *testing.T) { + var v time.Time + t.Log(v.UnixMilli()) +} diff --git a/pkg/common/storage/database/mgo/object.go b/pkg/common/storage/database/mgo/object.go index 5bc329d33..624eb84a0 100644 --- a/pkg/common/storage/database/mgo/object.go +++ b/pkg/common/storage/database/mgo/object.go @@ -22,7 +22,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/tools/db/mongoutil" - "github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/errs" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" @@ -91,21 +90,25 @@ func (o *S3Mongo) Take(ctx context.Context, engine string, name string) (*model. return mongoutil.FindOne[*model.Object](ctx, o.coll, bson.M{"name": name, "engine": engine}) } -func (o *S3Mongo) Delete(ctx context.Context, engine string, name string) error { - return mongoutil.DeleteOne(ctx, o.coll, bson.M{"name": name, "engine": engine}) +func (o *S3Mongo) Delete(ctx context.Context, engine string, name []string) error { + if len(name) == 0 { + return nil + } + return mongoutil.DeleteOne(ctx, o.coll, bson.M{"engine": engine, "name": bson.M{"$in": name}}) } -// Find Expires object -func (o *S3Mongo) FindNeedDeleteObjectByDB(ctx context.Context, duration time.Time, needDelType []string, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) { - return mongoutil.FindPage[*model.Object](ctx, o.coll, bson.M{ - "create_time": bson.M{"$lt": duration}, - "group": bson.M{"$in": needDelType}, - }, pagination) -} - -// Find object by key -func (o *S3Mongo) FindModelsByKey(ctx context.Context, key string) (objects []*model.Object, err error) { +func (o *S3Mongo) FindExpirationObject(ctx context.Context, engine string, expiration time.Time, needDelType []string, count int64) ([]*model.Object, error) { + opt := options.Find() + if count > 0 { + opt.SetLimit(count) + } return mongoutil.Find[*model.Object](ctx, o.coll, bson.M{ - "key": key, - }) + "engine": engine, + "create_time": bson.M{"$lt": expiration}, + "group": bson.M{"$in": needDelType}, + }, opt) +} + +func (o *S3Mongo) GetKeyCount(ctx context.Context, engine string, key string) (int64, error) { + return mongoutil.Count(ctx, o.coll, bson.M{"engine": engine, "key": key}) } diff --git a/pkg/common/storage/database/msg.go b/pkg/common/storage/database/msg.go index 23a99f5b9..b44e70296 100644 --- a/pkg/common/storage/database/msg.go +++ b/pkg/common/storage/database/msg.go @@ -24,28 +24,20 @@ import ( ) type Msg interface { - PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []model.MsgInfoModel) error Create(ctx context.Context, model *model.MsgDocModel) error UpdateMsg(ctx context.Context, docID string, index int64, key string, value any) (*mongo.UpdateResult, error) PushUnique(ctx context.Context, docID string, index int64, key string, value any) (*mongo.UpdateResult, error) - UpdateMsgContent(ctx context.Context, docID string, index int64, msg []byte) error - IsExistDocID(ctx context.Context, docID string) (bool, error) FindOneByDocID(ctx context.Context, docID string) (*model.MsgDocModel, error) GetMsgBySeqIndexIn1Doc(ctx context.Context, userID, docID string, seqs []int64) ([]*model.MsgInfoModel, error) GetNewestMsg(ctx context.Context, conversationID string) (*model.MsgInfoModel, error) GetOldestMsg(ctx context.Context, conversationID string) (*model.MsgInfoModel, error) - DeleteDocs(ctx context.Context, docIDs []string) error - GetMsgDocModelByIndex(ctx context.Context, conversationID string, index, sort int64) (*model.MsgDocModel, error) DeleteMsgsInOneDocByIndex(ctx context.Context, docID string, indexes []int) error MarkSingleChatMsgsAsRead(ctx context.Context, userID string, docID string, indexes []int64) error SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (int64, []*model.MsgInfoModel, error) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) - ConvertMsgsDocLen(ctx context.Context, conversationIDs []string) - DeleteDoc(ctx context.Context, docID string) error - DeleteMsgByIndex(ctx context.Context, docID string, index []int) error - GetBeforeMsg(ctx context.Context, ts int64, docIDs []string, limit int) ([]*model.MsgDocModel, error) - - GetDocIDs(ctx context.Context) ([]string, error) + GetRandBeforeMsg(ctx context.Context, ts int64, limit int) ([]*model.MsgDocModel, error) + GetLastMessageSeqByTime(ctx context.Context, conversationID string, time int64) (int64, error) + FindSeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) } diff --git a/pkg/common/storage/database/object.go b/pkg/common/storage/database/object.go index c741e39a6..5541a159b 100644 --- a/pkg/common/storage/database/object.go +++ b/pkg/common/storage/database/object.go @@ -19,13 +19,12 @@ import ( "time" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" - "github.com/openimsdk/tools/db/pagination" ) type ObjectInfo interface { SetObject(ctx context.Context, obj *model.Object) error Take(ctx context.Context, engine string, name string) (*model.Object, error) - Delete(ctx context.Context, engine string, name string) error - FindNeedDeleteObjectByDB(ctx context.Context, duration time.Time, needDelType []string, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) - FindModelsByKey(ctx context.Context, key string) (objects []*model.Object, err error) + Delete(ctx context.Context, engine string, name []string) error + FindExpirationObject(ctx context.Context, engine string, expiration time.Time, needDelType []string, count int64) ([]*model.Object, error) + GetKeyCount(ctx context.Context, engine string, key string) (int64, error) } diff --git a/pkg/common/storage/model/msg.go b/pkg/common/storage/model/msg.go index e16233973..69113032d 100644 --- a/pkg/common/storage/model/msg.go +++ b/pkg/common/storage/model/msg.go @@ -143,3 +143,7 @@ func (*MsgDocModel) GenExceptionMessageBySeqs(seqs []int64) (exceptionMsg []*sdk } return exceptionMsg } + +func (*MsgDocModel) GetMinSeq(index int) int64 { + return int64(index*singleGocMsgNum) + 1 +} diff --git a/pkg/rpcclient/notification/common.go b/pkg/notification/common_user/common.go similarity index 97% rename from pkg/rpcclient/notification/common.go rename to pkg/notification/common_user/common.go index 09d8b8798..470d9c7a2 100644 --- a/pkg/rpcclient/notification/common.go +++ b/pkg/notification/common_user/common.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package notification +package common_user type CommonUser interface { GetNickname() string diff --git a/pkg/rpcclient/grouphash/grouphash.go b/pkg/notification/grouphash/grouphash.go similarity index 100% rename from pkg/rpcclient/grouphash/grouphash.go rename to pkg/notification/grouphash/grouphash.go diff --git a/pkg/rpcclient/msg.go b/pkg/notification/msg.go similarity index 72% rename from pkg/rpcclient/msg.go rename to pkg/notification/msg.go index 8313937cd..0795982c8 100644 --- a/pkg/rpcclient/msg.go +++ b/pkg/notification/msg.go @@ -19,17 +19,14 @@ import ( "encoding/json" "time" - "google.golang.org/grpc" "google.golang.org/protobuf/proto" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/log" "github.com/openimsdk/tools/mq/memamq" - "github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/utils/idutil" "github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/timeutil" @@ -129,126 +126,6 @@ func newSessionTypeConf() map[int32]int32 { } } -type Message struct { - conn grpc.ClientConnInterface - Client msg.MsgClient - discov discovery.SvcDiscoveryRegistry -} - -func NewMessage(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Message { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := msg.NewMsgClient(conn) - return &Message{discov: discov, conn: conn, Client: client} -} - -type MessageRpcClient Message - -func NewMessageRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) MessageRpcClient { - return MessageRpcClient(*NewMessage(discov, rpcRegisterName)) -} - -// SendMsg sends a message through the gRPC client and returns the response. -// It wraps any encountered error for better error handling and context understanding. -func (m *MessageRpcClient) SendMsg(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { - resp, err := m.Client.SendMsg(ctx, req) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetUserConversationsMinSeq set min seq -func (m *MessageRpcClient) SetUserConversationsMinSeq(ctx context.Context, req *msg.SetUserConversationsMinSeqReq) (*msg.SetUserConversationsMinSeqResp, error) { - resp, err := m.Client.SetUserConversationsMinSeq(ctx, req) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetMaxSeq retrieves the maximum sequence number from the gRPC client. -// Errors during the gRPC call are wrapped to provide additional context. -func (m *MessageRpcClient) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { - resp, err := m.Client.GetMaxSeq(ctx, req) - if err != nil { - return nil, err - } - return resp, nil -} - -func (m *MessageRpcClient) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) { - log.ZDebug(ctx, "GetMaxSeqs", "conversationIDs", conversationIDs) - resp, err := m.Client.GetMaxSeqs(ctx, &msg.GetMaxSeqsReq{ - ConversationIDs: conversationIDs, - }) - if err != nil { - return nil, err - } - return resp.MaxSeqs, err -} - -func (m *MessageRpcClient) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) { - resp, err := m.Client.GetHasReadSeqs(ctx, &msg.GetHasReadSeqsReq{ - UserID: userID, - ConversationIDs: conversationIDs, - }) - if err != nil { - return nil, err - } - return resp.MaxSeqs, err -} - -func (m *MessageRpcClient) GetMsgByConversationIDs(ctx context.Context, docIDs []string, seqs map[string]int64) (map[string]*sdkws.MsgData, error) { - resp, err := m.Client.GetMsgByConversationIDs(ctx, &msg.GetMsgByConversationIDsReq{ - ConversationIDs: docIDs, - MaxSeqs: seqs, - }) - if err != nil { - return nil, err - } - return resp.MsgDatas, err -} - -// PullMessageBySeqList retrieves messages by their sequence numbers using the gRPC client. -// It directly forwards the request to the gRPC client and returns the response along with any error encountered. -func (m *MessageRpcClient) PullMessageBySeqList(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) { - resp, err := m.Client.PullMessageBySeqs(ctx, req) - if err != nil { - // Wrap the error to provide more context if the gRPC call fails. - return nil, err - } - return resp, nil -} - -func (m *MessageRpcClient) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *msg.GetConversationsHasReadAndMaxSeqReq) (*msg.GetConversationsHasReadAndMaxSeqResp, error) { - resp, err := m.Client.GetConversationsHasReadAndMaxSeq(ctx, req) - if err != nil { - // Wrap the error to provide more context if the gRPC call fails. - return nil, err - } - return resp, nil -} - -func (m *MessageRpcClient) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) { - return m.Client.GetSeqMessage(ctx, req) -} - -func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { - resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID}) - if err != nil { - return 0, err - } - return resp.MaxSeq, nil -} - -func (m *MessageRpcClient) DestructMsgs(ctx context.Context, ts int64) error { - _, err := m.Client.DestructMsgs(ctx, &msg.DestructMsgsReq{Timestamp: ts}) - return err -} - type NotificationSender struct { contentTypeConf map[int32]config.NotificationConfig sessionTypeConf map[int32]int32 @@ -271,15 +148,17 @@ func WithLocalSendMsg(sendMsg func(ctx context.Context, req *msg.SendMsgReq) (*m } } -func WithRpcClient(msgRpcClient *MessageRpcClient) NotificationSenderOptions { +func WithRpcClient(sendMsg func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error)) NotificationSenderOptions { return func(s *NotificationSender) { - s.sendMsg = msgRpcClient.SendMsg + s.sendMsg = func(ctx context.Context, req *msg.SendMsgReq) (*msg.SendMsgResp, error) { + return sendMsg(ctx, req) + } } } -func WithUserRpcClient(userRpcClient *UserRpcClient) NotificationSenderOptions { +func WithUserRpcClient(getUserInfo func(ctx context.Context, userID string) (*sdkws.UserInfo, error)) NotificationSenderOptions { return func(s *NotificationSender) { - s.getUserInfo = userRpcClient.GetUserInfo + s.getUserInfo = getUserInfo } } diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go index 925d2a37c..70f5acfd1 100644 --- a/pkg/rpccache/conversation.go +++ b/pkg/rpccache/conversation.go @@ -19,7 +19,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" pbconversation "github.com/openimsdk/protocol/conversation" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" @@ -32,7 +32,7 @@ const ( conversationWorkerCount = 20 ) -func NewConversationLocalCache(client rpcclient.ConversationRpcClient, localCache *config.LocalCache, cli redis.UniversalClient) *ConversationLocalCache { +func NewConversationLocalCache(client *rpcli.ConversationClient, localCache *config.LocalCache, cli redis.UniversalClient) *ConversationLocalCache { lc := localCache.Conversation log.ZDebug(context.Background(), "ConversationLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) x := &ConversationLocalCache{ @@ -52,7 +52,7 @@ func NewConversationLocalCache(client rpcclient.ConversationRpcClient, localCach } type ConversationLocalCache struct { - client rpcclient.ConversationRpcClient + client *rpcli.ConversationClient local localcache.Cache[[]byte] } @@ -76,7 +76,7 @@ func (c *ConversationLocalCache) getConversationIDs(ctx context.Context, ownerUs var cache cacheProto[pbconversation.GetConversationIDsResp] return cache.Unmarshal(c.local.Get(ctx, cachekey.GetConversationIDsKey(ownerUserID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "ConversationLocalCache getConversationIDs rpc", "ownerUserID", ownerUserID) - return cache.Marshal(c.client.Client.GetConversationIDs(ctx, &pbconversation.GetConversationIDsReq{UserID: ownerUserID})) + return cache.Marshal(c.client.ConversationClient.GetConversationIDs(ctx, &pbconversation.GetConversationIDsReq{UserID: ownerUserID})) })) } @@ -92,7 +92,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co var cache cacheProto[pbconversation.Conversation] return cache.Unmarshal(c.local.Get(ctx, cachekey.GetConversationKey(userID, conversationID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "ConversationLocalCache GetConversation rpc", "userID", userID, "conversationID", conversationID) - return cache.Marshal(c.client.GetConversation(ctx, userID, conversationID)) + return cache.Marshal(c.client.GetConversation(ctx, conversationID, userID)) })) } @@ -149,7 +149,7 @@ func (c *ConversationLocalCache) getConversationNotReceiveMessageUserIDs(ctx con var cache cacheProto[pbconversation.GetConversationNotReceiveMessageUserIDsResp] return cache.Unmarshal(c.local.Get(ctx, cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "ConversationLocalCache getConversationNotReceiveMessageUserIDs rpc", "conversationID", conversationID) - return cache.Marshal(c.client.Client.GetConversationNotReceiveMessageUserIDs(ctx, &pbconversation.GetConversationNotReceiveMessageUserIDsReq{ConversationID: conversationID})) + return cache.Marshal(c.client.ConversationClient.GetConversationNotReceiveMessageUserIDs(ctx, &pbconversation.GetConversationNotReceiveMessageUserIDsReq{ConversationID: conversationID})) })) } diff --git a/pkg/rpccache/friend.go b/pkg/rpccache/friend.go index dca3b4c97..8ed6c1ae9 100644 --- a/pkg/rpccache/friend.go +++ b/pkg/rpccache/friend.go @@ -17,16 +17,16 @@ package rpccache import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/protocol/relation" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" ) -func NewFriendLocalCache(client rpcclient.FriendRpcClient, localCache *config.LocalCache, cli redis.UniversalClient) *FriendLocalCache { +func NewFriendLocalCache(client *rpcli.RelationClient, localCache *config.LocalCache, cli redis.UniversalClient) *FriendLocalCache { lc := localCache.Friend log.ZDebug(context.Background(), "FriendLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) x := &FriendLocalCache{ @@ -46,7 +46,7 @@ func NewFriendLocalCache(client rpcclient.FriendRpcClient, localCache *config.Lo } type FriendLocalCache struct { - client rpcclient.FriendRpcClient + client *rpcli.RelationClient local localcache.Cache[[]byte] } @@ -70,7 +70,7 @@ func (f *FriendLocalCache) isFriend(ctx context.Context, possibleFriendUserID, u var cache cacheProto[relation.IsFriendResp] return cache.Unmarshal(f.local.GetLink(ctx, cachekey.GetIsFriendKey(possibleFriendUserID, userID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "FriendLocalCache isFriend rpc", "possibleFriendUserID", possibleFriendUserID, "userID", userID) - return cache.Marshal(f.client.Client.IsFriend(ctx, &relation.IsFriendReq{UserID1: userID, UserID2: possibleFriendUserID})) + return cache.Marshal(f.client.FriendClient.IsFriend(ctx, &relation.IsFriendReq{UserID1: userID, UserID2: possibleFriendUserID})) }, cachekey.GetFriendIDsKey(possibleFriendUserID))) } @@ -96,6 +96,6 @@ func (f *FriendLocalCache) isBlack(ctx context.Context, possibleBlackUserID, use var cache cacheProto[relation.IsBlackResp] return cache.Unmarshal(f.local.GetLink(ctx, cachekey.GetIsBlackIDsKey(possibleBlackUserID, userID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "FriendLocalCache IsBlack rpc", "possibleBlackUserID", possibleBlackUserID, "userID", userID) - return cache.Marshal(f.client.Client.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID})) + return cache.Marshal(f.client.FriendClient.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID})) }, cachekey.GetBlackIDsKey(userID))) } diff --git a/pkg/rpccache/group.go b/pkg/rpccache/group.go index b2d852fc5..174ba7dc5 100644 --- a/pkg/rpccache/group.go +++ b/pkg/rpccache/group.go @@ -17,19 +17,19 @@ package rpccache import ( "context" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/protocol/group" "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" ) -func NewGroupLocalCache(client rpcclient.GroupRpcClient, localCache *config.LocalCache, cli redis.UniversalClient) *GroupLocalCache { +func NewGroupLocalCache(client *rpcli.GroupClient, localCache *config.LocalCache, cli redis.UniversalClient) *GroupLocalCache { lc := localCache.Group log.ZDebug(context.Background(), "GroupLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) x := &GroupLocalCache{ @@ -49,7 +49,7 @@ func NewGroupLocalCache(client rpcclient.GroupRpcClient, localCache *config.Loca } type GroupLocalCache struct { - client rpcclient.GroupRpcClient + client *rpcli.GroupClient local localcache.Cache[[]byte] } @@ -65,7 +65,7 @@ func (g *GroupLocalCache) getGroupMemberIDs(ctx context.Context, groupID string) var cache cacheProto[group.GetGroupMemberUserIDsResp] return cache.Unmarshal(g.local.Get(ctx, cachekey.GetGroupMemberIDsKey(groupID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "GroupLocalCache getGroupMemberIDs rpc", "groupID", groupID) - return cache.Marshal(g.client.Client.GetGroupMemberUserIDs(ctx, &group.GetGroupMemberUserIDsReq{GroupID: groupID})) + return cache.Marshal(g.client.GroupClient.GetGroupMemberUserIDs(ctx, &group.GetGroupMemberUserIDsReq{GroupID: groupID})) })) } diff --git a/pkg/rpccache/online.go b/pkg/rpccache/online.go index a02a0662d..8f5323477 100644 --- a/pkg/rpccache/online.go +++ b/pkg/rpccache/online.go @@ -3,6 +3,7 @@ package rpccache import ( "context" "fmt" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/user" "math/rand" @@ -14,7 +15,6 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/util/useronline" "github.com/openimsdk/tools/db/cacheutil" "github.com/openimsdk/tools/log" @@ -22,10 +22,10 @@ import ( "github.com/redis/go-redis/v9" ) -func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) { +func NewOnlineCache(client *rpcli.UserClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) { l := &sync.Mutex{} x := &OnlineCache{ - user: user, + client: client, group: group, fullUserCache: fullUserCache, Lock: l, @@ -65,8 +65,8 @@ const ( ) type OnlineCache struct { - user rpcclient.UserRpcClient - group *GroupLocalCache + client *rpcli.UserClient + group *GroupLocalCache // fullUserCache if enabled, caches the online status of all users using mapCache; // otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache. @@ -112,7 +112,7 @@ func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) { cursor := uint64(0) for resp == nil || resp.NextCursor != 0 { if err = retryOperation(func() error { - resp, err = o.user.GetAllOnlineUsers(ctx, cursor) + resp, err = o.client.GetAllOnlineUsers(ctx, cursor) if err != nil { return err } @@ -186,7 +186,7 @@ func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) { - return o.user.GetUserOnlinePlatform(ctx, userID) + return o.client.GetUserOnlinePlatform(ctx, userID) }) if err != nil { log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID) @@ -227,8 +227,7 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) { platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) { platformIDsMap := make(map[string][]int32) - - usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers) + usersStatus, err := o.client.GetUsersOnlinePlatform(ctx, missingUsers) if err != nil { return nil, err } diff --git a/pkg/rpccache/subscriber.go b/pkg/rpccache/subscriber.go index 44e1f5885..0cb35bebf 100644 --- a/pkg/rpccache/subscriber.go +++ b/pkg/rpccache/subscriber.go @@ -17,7 +17,7 @@ package rpccache import ( "context" "encoding/json" - + "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/log" "github.com/redis/go-redis/v9" ) @@ -25,7 +25,7 @@ import ( func subscriberRedisDeleteCache(ctx context.Context, client redis.UniversalClient, channel string, del func(ctx context.Context, key ...string)) { defer func() { if r := recover(); r != nil { - log.ZPanic(ctx, "subscriberRedisDeleteCache Panic", r) + log.ZPanic(ctx, "subscriberRedisDeleteCache Panic", errs.ErrPanic(r)) } }() for message := range client.Subscribe(ctx, channel).Channel() { diff --git a/pkg/rpccache/user.go b/pkg/rpccache/user.go index 79a768597..cb0704906 100644 --- a/pkg/rpccache/user.go +++ b/pkg/rpccache/user.go @@ -16,11 +16,11 @@ package rpccache import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" - "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/user" "github.com/openimsdk/tools/errs" @@ -28,7 +28,7 @@ import ( "github.com/redis/go-redis/v9" ) -func NewUserLocalCache(client rpcclient.UserRpcClient, localCache *config.LocalCache, cli redis.UniversalClient) *UserLocalCache { +func NewUserLocalCache(client *rpcli.UserClient, localCache *config.LocalCache, cli redis.UniversalClient) *UserLocalCache { lc := localCache.User log.ZDebug(context.Background(), "UserLocalCache", "topic", lc.Topic, "slotNum", lc.SlotNum, "slotSize", lc.SlotSize, "enable", lc.Enable()) x := &UserLocalCache{ @@ -48,7 +48,7 @@ func NewUserLocalCache(client rpcclient.UserRpcClient, localCache *config.LocalC } type UserLocalCache struct { - client rpcclient.UserRpcClient + client *rpcli.UserClient local localcache.Cache[[]byte] } @@ -88,7 +88,7 @@ func (u *UserLocalCache) getUserGlobalMsgRecvOpt(ctx context.Context, userID str var cache cacheProto[user.GetGlobalRecvMessageOptResp] return cache.Unmarshal(u.local.Get(ctx, cachekey.GetUserGlobalRecvMsgOptKey(userID), func(ctx context.Context) ([]byte, error) { log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID) - return cache.Marshal(u.client.Client.GetGlobalRecvMessageOpt(ctx, &user.GetGlobalRecvMessageOptReq{UserID: userID})) + return cache.Marshal(u.client.UserClient.GetGlobalRecvMessageOpt(ctx, &user.GetGlobalRecvMessageOptReq{UserID: userID})) })) } diff --git a/pkg/rpcclient/auth.go b/pkg/rpcclient/auth.go deleted file mode 100644 index 05fec35a0..000000000 --- a/pkg/rpcclient/auth.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - - "github.com/openimsdk/protocol/auth" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "google.golang.org/grpc" -) - -func NewAuth(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Auth { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := auth.NewAuthClient(conn) - return &Auth{discov: discov, conn: conn, Client: client} -} - -type Auth struct { - conn grpc.ClientConnInterface - Client auth.AuthClient - discov discovery.SvcDiscoveryRegistry -} - -func (a *Auth) ParseToken(ctx context.Context, token string) (*auth.ParseTokenResp, error) { - req := auth.ParseTokenReq{ - Token: token, - } - resp, err := a.Client.ParseToken(ctx, &req) - if err != nil { - return nil, err - } - return resp, err -} - -func (a *Auth) InvalidateToken(ctx context.Context, preservedToken, userID string, platformID int) (*auth.InvalidateTokenResp, error) { - req := auth.InvalidateTokenReq{ - PreservedToken: preservedToken, - UserID: userID, - PlatformID: int32(platformID), - } - resp, err := a.Client.InvalidateToken(ctx, &req) - if err != nil { - return nil, err - } - return resp, err -} - -func (a *Auth) KickTokens(ctx context.Context, tokens []string) (*auth.KickTokensResp, error) { - req := auth.KickTokensReq{ - Tokens: tokens, - } - resp, err := a.Client.KickTokens(ctx, &req) - if err != nil { - return nil, err - } - return resp, err -} diff --git a/pkg/rpcclient/conversation.go b/pkg/rpcclient/conversation.go deleted file mode 100644 index c69d355d6..000000000 --- a/pkg/rpcclient/conversation.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - "fmt" - - pbconversation "github.com/openimsdk/protocol/conversation" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/errs" - "github.com/openimsdk/tools/system/program" - "google.golang.org/grpc" -) - -type Conversation struct { - Client pbconversation.ConversationClient - conn grpc.ClientConnInterface - discov discovery.SvcDiscoveryRegistry -} - -func NewConversation(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Conversation { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := pbconversation.NewConversationClient(conn) - return &Conversation{discov: discov, conn: conn, Client: client} -} - -type ConversationRpcClient Conversation - -func NewConversationRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) ConversationRpcClient { - return ConversationRpcClient(*NewConversation(discov, rpcRegisterName)) -} - -func (c *ConversationRpcClient) GetSingleConversationRecvMsgOpt(ctx context.Context, userID, conversationID string) (int32, error) { - var req pbconversation.GetConversationReq - req.OwnerUserID = userID - req.ConversationID = conversationID - conversation, err := c.Client.GetConversation(ctx, &req) - if err != nil { - return 0, err - } - return conversation.GetConversation().RecvMsgOpt, err -} - -func (c *ConversationRpcClient) SingleChatFirstCreateConversation(ctx context.Context, recvID, sendID, - conversationID string, conversationType int32) error { - _, err := c.Client.CreateSingleChatConversations(ctx, - &pbconversation.CreateSingleChatConversationsReq{ - RecvID: recvID, SendID: sendID, ConversationID: conversationID, - ConversationType: conversationType, - }) - return err -} - -func (c *ConversationRpcClient) GroupChatFirstCreateConversation(ctx context.Context, groupID string, userIDs []string) error { - _, err := c.Client.CreateGroupChatConversations(ctx, &pbconversation.CreateGroupChatConversationsReq{UserIDs: userIDs, GroupID: groupID}) - return err -} - -func (c *ConversationRpcClient) SetConversationMaxSeq(ctx context.Context, ownerUserIDs []string, conversationID string, maxSeq int64) error { - _, err := c.Client.SetConversationMaxSeq(ctx, &pbconversation.SetConversationMaxSeqReq{OwnerUserID: ownerUserIDs, ConversationID: conversationID, MaxSeq: maxSeq}) - return err -} - -func (c *ConversationRpcClient) SetConversationMinSeq(ctx context.Context, ownerUserIDs []string, conversationID string, minSeq int64) error { - _, err := c.Client.SetConversationMinSeq(ctx, &pbconversation.SetConversationMinSeqReq{OwnerUserID: ownerUserIDs, ConversationID: conversationID, MinSeq: minSeq}) - return err -} - -func (c *ConversationRpcClient) SetConversations(ctx context.Context, userIDs []string, conversation *pbconversation.ConversationReq) error { - _, err := c.Client.SetConversations(ctx, &pbconversation.SetConversationsReq{UserIDs: userIDs, Conversation: conversation}) - return err -} - -func (c *ConversationRpcClient) UpdateConversation(ctx context.Context, conversation *pbconversation.UpdateConversationReq) error { - _, err := c.Client.UpdateConversation(ctx, conversation) - return err -} - -func (c *ConversationRpcClient) GetConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) { - resp, err := c.Client.GetConversationIDs(ctx, &pbconversation.GetConversationIDsReq{UserID: ownerUserID}) - if err != nil { - return nil, err - } - return resp.ConversationIDs, nil -} - -func (c *ConversationRpcClient) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*pbconversation.Conversation, error) { - resp, err := c.Client.GetConversation(ctx, &pbconversation.GetConversationReq{OwnerUserID: ownerUserID, ConversationID: conversationID}) - if err != nil { - return nil, err - } - return resp.Conversation, nil -} - -func (c *ConversationRpcClient) GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*pbconversation.Conversation, error) { - if len(conversationIDs) == 0 { - return nil, nil - } - resp, err := c.Client.GetConversationsByConversationID(ctx, &pbconversation.GetConversationsByConversationIDReq{ConversationIDs: conversationIDs}) - if err != nil { - return nil, err - } - if len(resp.Conversations) == 0 { - return nil, errs.ErrRecordNotFound.WrapMsg(fmt.Sprintf("conversationIDs: %v not found", conversationIDs)) - } - return resp.Conversations, nil -} - -func (c *ConversationRpcClient) GetConversationOfflinePushUserIDs(ctx context.Context, conversationID string, userIDs []string) ([]string, error) { - resp, err := c.Client.GetConversationOfflinePushUserIDs(ctx, &pbconversation.GetConversationOfflinePushUserIDsReq{ConversationID: conversationID, UserIDs: userIDs}) - if err != nil { - return nil, err - } - return resp.UserIDs, nil -} - -func (c *ConversationRpcClient) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*pbconversation.Conversation, error) { - if len(conversationIDs) == 0 { - return nil, nil - } - resp, err := c.Client.GetConversations( - ctx, - &pbconversation.GetConversationsReq{OwnerUserID: ownerUserID, ConversationIDs: conversationIDs}, - ) - if err != nil { - return nil, err - } - return resp.Conversations, nil -} - -func (c *ConversationRpcClient) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) { - resp, err := c.Client.GetConversationNotReceiveMessageUserIDs(ctx, &pbconversation.GetConversationNotReceiveMessageUserIDsReq{ConversationID: conversationID}) - if err != nil { - return nil, err - } - return resp.UserIDs, nil -} - -func (c *ConversationRpcClient) GetConversationsNeedClearMsg(ctx context.Context) ([]*pbconversation.Conversation, error) { - resp, err := c.Client.GetConversationsNeedClearMsg(ctx, &pbconversation.GetConversationsNeedClearMsgReq{}) - if err != nil { - return nil, err - } - return resp.Conversations, nil -} diff --git a/pkg/rpcclient/doc.go b/pkg/rpcclient/doc.go deleted file mode 100644 index 66b0aee91..000000000 --- a/pkg/rpcclient/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright © 2024 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient // import "github.com/openimsdk/open-im-server/v3/pkg/rpcclient" diff --git a/pkg/rpcclient/friend.go b/pkg/rpcclient/friend.go deleted file mode 100644 index 359ed3a8b..000000000 --- a/pkg/rpcclient/friend.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - - "github.com/openimsdk/protocol/relation" - sdkws "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "google.golang.org/grpc" -) - -type Friend struct { - conn grpc.ClientConnInterface - Client relation.FriendClient - discov discovery.SvcDiscoveryRegistry -} - -func NewFriend(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Friend { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := relation.NewFriendClient(conn) - return &Friend{discov: discov, conn: conn, Client: client} -} - -type FriendRpcClient Friend - -func NewFriendRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) FriendRpcClient { - return FriendRpcClient(*NewFriend(discov, rpcRegisterName)) -} - -func (f *FriendRpcClient) GetFriendsInfo( - ctx context.Context, - ownerUserID, relationUserID string, -) (resp *sdkws.FriendInfo, err error) { - r, err := f.Client.GetDesignatedFriends( - ctx, - &relation.GetDesignatedFriendsReq{OwnerUserID: ownerUserID, FriendUserIDs: []string{relationUserID}}, - ) - if err != nil { - return nil, err - } - resp = r.FriendsInfo[0] - return -} - -// possibleFriendUserID Is PossibleFriendUserId's relations. -func (f *FriendRpcClient) IsFriend(ctx context.Context, possibleFriendUserID, userID string) (bool, error) { - resp, err := f.Client.IsFriend(ctx, &relation.IsFriendReq{UserID1: userID, UserID2: possibleFriendUserID}) - if err != nil { - return false, err - } - return resp.InUser1Friends, nil -} - -func (f *FriendRpcClient) GetFriendIDs(ctx context.Context, ownerUserID string) (relationIDs []string, err error) { - req := relation.GetFriendIDsReq{UserID: ownerUserID} - resp, err := f.Client.GetFriendIDs(ctx, &req) - if err != nil { - return nil, err - } - return resp.FriendIDs, nil -} - -func (f *FriendRpcClient) IsBlack(ctx context.Context, possibleBlackUserID, userID string) (bool, error) { - r, err := f.Client.IsBlack(ctx, &relation.IsBlackReq{UserID1: possibleBlackUserID, UserID2: userID}) - if err != nil { - return false, err - } - return r.InUser2Blacks, nil -} diff --git a/pkg/rpcclient/group.go b/pkg/rpcclient/group.go deleted file mode 100644 index 30d0b3288..000000000 --- a/pkg/rpcclient/group.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - "strings" - - "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/protocol/constant" - "github.com/openimsdk/protocol/group" - "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "github.com/openimsdk/tools/utils/datautil" -) - -type Group struct { - Client group.GroupClient - discov discovery.SvcDiscoveryRegistry -} - -func NewGroup(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Group { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := group.NewGroupClient(conn) - return &Group{discov: discov, Client: client} -} - -type GroupRpcClient Group - -func NewGroupRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) GroupRpcClient { - return GroupRpcClient(*NewGroup(discov, rpcRegisterName)) -} - -func (g *GroupRpcClient) GetGroupInfos(ctx context.Context, groupIDs []string, complete bool) ([]*sdkws.GroupInfo, error) { - resp, err := g.Client.GetGroupsInfo(ctx, &group.GetGroupsInfoReq{ - GroupIDs: groupIDs, - }) - if err != nil { - return nil, err - } - if complete { - if ids := datautil.Single(groupIDs, datautil.Slice(resp.GroupInfos, func(e *sdkws.GroupInfo) string { - return e.GroupID - })); len(ids) > 0 { - return nil, servererrs.ErrGroupIDNotFound.WrapMsg(strings.Join(ids, ",")) - } - } - return resp.GroupInfos, nil -} - -func (g *GroupRpcClient) GetGroupInfo(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { - groups, err := g.GetGroupInfos(ctx, []string{groupID}, true) - if err != nil { - return nil, err - } - return groups[0], nil -} - -func (g *GroupRpcClient) GetGroupInfoMap( - ctx context.Context, - groupIDs []string, - complete bool, -) (map[string]*sdkws.GroupInfo, error) { - groups, err := g.GetGroupInfos(ctx, groupIDs, complete) - if err != nil { - return nil, err - } - return datautil.SliceToMap(groups, func(e *sdkws.GroupInfo) string { - return e.GroupID - }), nil -} - -func (g *GroupRpcClient) GetGroupMemberInfos( - ctx context.Context, - groupID string, - userIDs []string, - complete bool, -) ([]*sdkws.GroupMemberFullInfo, error) { - resp, err := g.Client.GetGroupMembersInfo(ctx, &group.GetGroupMembersInfoReq{ - GroupID: groupID, - UserIDs: userIDs, - }) - if err != nil { - return nil, err - } - if complete { - if ids := datautil.Single(userIDs, datautil.Slice(resp.Members, func(e *sdkws.GroupMemberFullInfo) string { - return e.UserID - })); len(ids) > 0 { - return nil, servererrs.ErrNotInGroupYet.WrapMsg(strings.Join(ids, ",")) - } - } - return resp.Members, nil -} - -func (g *GroupRpcClient) GetGroupMemberInfo( - ctx context.Context, - groupID string, - userID string, -) (*sdkws.GroupMemberFullInfo, error) { - members, err := g.GetGroupMemberInfos(ctx, groupID, []string{userID}, true) - if err != nil { - return nil, err - } - return members[0], nil -} - -func (g *GroupRpcClient) GetGroupMemberInfoMap( - ctx context.Context, - groupID string, - userIDs []string, - complete bool, -) (map[string]*sdkws.GroupMemberFullInfo, error) { - members, err := g.GetGroupMemberInfos(ctx, groupID, userIDs, true) - if err != nil { - return nil, err - } - return datautil.SliceToMap(members, func(e *sdkws.GroupMemberFullInfo) string { - return e.UserID - }), nil -} - -func (g *GroupRpcClient) GetOwnerAndAdminInfos( - ctx context.Context, - groupID string, -) ([]*sdkws.GroupMemberFullInfo, error) { - resp, err := g.Client.GetGroupMemberRoleLevel(ctx, &group.GetGroupMemberRoleLevelReq{ - GroupID: groupID, - RoleLevels: []int32{constant.GroupOwner, constant.GroupAdmin}, - }) - if err != nil { - return nil, err - } - return resp.Members, nil -} - -func (g *GroupRpcClient) GetOwnerInfo(ctx context.Context, groupID string) (*sdkws.GroupMemberFullInfo, error) { - resp, err := g.Client.GetGroupMemberRoleLevel(ctx, &group.GetGroupMemberRoleLevelReq{ - GroupID: groupID, - RoleLevels: []int32{constant.GroupOwner}, - }) - return resp.Members[0], err -} - -func (g *GroupRpcClient) GetGroupMemberIDs(ctx context.Context, groupID string) ([]string, error) { - resp, err := g.Client.GetGroupMemberUserIDs(ctx, &group.GetGroupMemberUserIDsReq{ - GroupID: groupID, - }) - if err != nil { - return nil, err - } - return resp.UserIDs, nil -} - -func (g *GroupRpcClient) GetGroupInfoCache(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { - resp, err := g.Client.GetGroupInfoCache(ctx, &group.GetGroupInfoCacheReq{ - GroupID: groupID, - }) - if err != nil { - return nil, err - } - return resp.GroupInfo, nil -} - -func (g *GroupRpcClient) GetGroupMemberCache(ctx context.Context, groupID string, groupMemberID string) (*sdkws.GroupMemberFullInfo, error) { - resp, err := g.Client.GetGroupMemberCache(ctx, &group.GetGroupMemberCacheReq{ - GroupID: groupID, - GroupMemberID: groupMemberID, - }) - if err != nil { - return nil, err - } - return resp.Member, nil -} - -func (g *GroupRpcClient) DismissGroup(ctx context.Context, groupID string) error { - _, err := g.Client.DismissGroup(ctx, &group.DismissGroupReq{ - GroupID: groupID, - DeleteMember: true, - }) - return err -} - -func (g *GroupRpcClient) NotificationUserInfoUpdate(ctx context.Context, userID string) error { - _, err := g.Client.NotificationUserInfoUpdate(ctx, &group.NotificationUserInfoUpdateReq{ - UserID: userID, - }) - return err -} diff --git a/pkg/rpcclient/grouphash/doc.go b/pkg/rpcclient/grouphash/doc.go deleted file mode 100644 index c780701d9..000000000 --- a/pkg/rpcclient/grouphash/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright © 2024 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grouphash // import "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/grouphash" diff --git a/pkg/rpcclient/notification/doc.go b/pkg/rpcclient/notification/doc.go deleted file mode 100644 index 8ce57ca4e..000000000 --- a/pkg/rpcclient/notification/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright © 2024 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package notification // import "github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification" diff --git a/pkg/rpcclient/push.go b/pkg/rpcclient/push.go deleted file mode 100644 index c549e454a..000000000 --- a/pkg/rpcclient/push.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - - "github.com/openimsdk/protocol/push" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "google.golang.org/grpc" -) - -type Push struct { - conn grpc.ClientConnInterface - Client push.PushMsgServiceClient - discov discovery.SvcDiscoveryRegistry -} - -func NewPush(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) *Push { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - return &Push{ - discov: discov, - conn: conn, - Client: push.NewPushMsgServiceClient(conn), - } -} - -type PushRpcClient Push - -func NewPushRpcClient(discov discovery.SvcDiscoveryRegistry, rpcRegisterName string) PushRpcClient { - return PushRpcClient(*NewPush(discov, rpcRegisterName)) -} - -func (p *PushRpcClient) DelUserPushToken(ctx context.Context, req *push.DelUserPushTokenReq) (*push.DelUserPushTokenResp, error) { - return p.Client.DelUserPushToken(ctx, req) -} diff --git a/pkg/rpcclient/third.go b/pkg/rpcclient/third.go deleted file mode 100644 index 7cdc60d52..000000000 --- a/pkg/rpcclient/third.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - - "github.com/openimsdk/protocol/third" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "google.golang.org/grpc" -) - -type Third struct { - conn grpc.ClientConnInterface - Client third.ThirdClient - discov discovery.SvcDiscoveryRegistry - GrafanaUrl string -} - -func NewThird(discov discovery.SvcDiscoveryRegistry, rpcRegisterName, grafanaUrl string) *Third { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := third.NewThirdClient(conn) - if err != nil { - program.ExitWithError(err) - } - return &Third{discov: discov, Client: client, conn: conn, GrafanaUrl: grafanaUrl} -} -func (t *Third) DeleteOutdatedData(ctx context.Context, expires int64) error { - _, err := t.Client.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: expires}) - return err -} diff --git a/pkg/rpcclient/user.go b/pkg/rpcclient/user.go deleted file mode 100644 index bdc1a2e01..000000000 --- a/pkg/rpcclient/user.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright © 2023 OpenIM. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcclient - -import ( - "context" - "strings" - - "github.com/openimsdk/open-im-server/v3/pkg/authverify" - "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/protocol/sdkws" - "github.com/openimsdk/protocol/user" - "github.com/openimsdk/tools/discovery" - "github.com/openimsdk/tools/system/program" - "github.com/openimsdk/tools/utils/datautil" - "google.golang.org/grpc" -) - -// User represents a structure holding connection details for the User RPC client. -type User struct { - conn grpc.ClientConnInterface - Client user.UserClient - Discov discovery.SvcDiscoveryRegistry - MessageGateWayRpcName string - imAdminUserID []string -} - -// NewUser initializes and returns a User instance based on the provided service discovery registry. -func NewUser(discov discovery.SvcDiscoveryRegistry, rpcRegisterName, messageGateWayRpcName string, - imAdminUserID []string) *User { - conn, err := discov.GetConn(context.Background(), rpcRegisterName) - if err != nil { - program.ExitWithError(err) - } - client := user.NewUserClient(conn) - return &User{Discov: discov, Client: client, - conn: conn, - MessageGateWayRpcName: messageGateWayRpcName, - imAdminUserID: imAdminUserID} -} - -// UserRpcClient represents the structure for a User RPC client. -type UserRpcClient User - -// NewUserRpcClientByUser initializes a UserRpcClient based on the provided User instance. -func NewUserRpcClientByUser(user *User) *UserRpcClient { - rpc := UserRpcClient(*user) - return &rpc -} - -// NewUserRpcClient initializes a UserRpcClient based on the provided service discovery registry. -func NewUserRpcClient(client discovery.SvcDiscoveryRegistry, rpcRegisterName string, - imAdminUserID []string) UserRpcClient { - return UserRpcClient(*NewUser(client, rpcRegisterName, "", imAdminUserID)) -} - -// GetUsersInfo retrieves information for multiple users based on their user IDs. -func (u *UserRpcClient) GetUsersInfo(ctx context.Context, userIDs []string) ([]*sdkws.UserInfo, error) { - if len(userIDs) == 0 { - return []*sdkws.UserInfo{}, nil - } - resp, err := u.Client.GetDesignateUsers(ctx, &user.GetDesignateUsersReq{ - UserIDs: userIDs, - }) - if err != nil { - return nil, err - } - if ids := datautil.Single(userIDs, datautil.Slice(resp.UsersInfo, func(e *sdkws.UserInfo) string { - return e.UserID - })); len(ids) > 0 { - return nil, servererrs.ErrUserIDNotFound.WrapMsg(strings.Join(ids, ",")) - } - return resp.UsersInfo, nil -} - -// GetUserInfo retrieves information for a single user based on the provided user ID. -func (u *UserRpcClient) GetUserInfo(ctx context.Context, userID string) (*sdkws.UserInfo, error) { - users, err := u.GetUsersInfo(ctx, []string{userID}) - if err != nil { - return nil, err - } - return users[0], nil -} - -// GetUsersInfoMap retrieves a map of user information indexed by their user IDs. -func (u *UserRpcClient) GetUsersInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error) { - users, err := u.GetUsersInfo(ctx, userIDs) - if err != nil { - return nil, err - } - return datautil.SliceToMap(users, func(e *sdkws.UserInfo) string { - return e.UserID - }), nil -} - -// GetPublicUserInfos retrieves public information for multiple users based on their user IDs. -func (u *UserRpcClient) GetPublicUserInfos( - ctx context.Context, - userIDs []string, -) ([]*sdkws.PublicUserInfo, error) { - users, err := u.GetUsersInfo(ctx, userIDs) - if err != nil { - return nil, err - } - - return datautil.Slice(users, func(e *sdkws.UserInfo) *sdkws.PublicUserInfo { - return &sdkws.PublicUserInfo{ - UserID: e.UserID, - Nickname: e.Nickname, - FaceURL: e.FaceURL, - Ex: e.Ex, - } - }), nil -} - -// GetPublicUserInfo retrieves public information for a single user based on the provided user ID. -func (u *UserRpcClient) GetPublicUserInfo(ctx context.Context, userID string) (*sdkws.PublicUserInfo, error) { - users, err := u.GetPublicUserInfos(ctx, []string{userID}) - if err != nil { - return nil, err - } - - return users[0], nil -} - -// GetPublicUserInfoMap retrieves a map of public user information indexed by their user IDs. -func (u *UserRpcClient) GetPublicUserInfoMap( - ctx context.Context, - userIDs []string, -) (map[string]*sdkws.PublicUserInfo, error) { - users, err := u.GetPublicUserInfos(ctx, userIDs) - if err != nil { - return nil, err - } - - return datautil.SliceToMap(users, func(e *sdkws.PublicUserInfo) string { - return e.UserID - }), nil -} - -// GetUserGlobalMsgRecvOpt retrieves the global message receive option for a user based on the provided user ID. -func (u *UserRpcClient) GetUserGlobalMsgRecvOpt(ctx context.Context, userID string) (int32, error) { - resp, err := u.Client.GetGlobalRecvMessageOpt(ctx, &user.GetGlobalRecvMessageOptReq{ - UserID: userID, - }) - if err != nil { - return 0, err - } - return resp.GlobalRecvMsgOpt, nil -} - -// Access verifies the access rights for the provided user ID. -func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error { - _, err := u.GetUserInfo(ctx, ownerUserID) - if err != nil { - return err - } - return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID) -} - -// GetAllUserID retrieves all user IDs with pagination options. -func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) { - resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) - if err != nil { - return nil, err - } - return resp, nil -} - -// GetAllUserIDs retrieves all user IDs with pagination options. -func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { - resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) - if err != nil { - return nil, err - } - return resp.UserIDs, nil -} - -// SetUserStatus sets the status for a user based on the provided user ID, status, and platform ID. -func (u *UserRpcClient) SetUserStatus(ctx context.Context, userID string, status int32, platformID int) error { - _, err := u.Client.SetUserStatus(ctx, &user.SetUserStatusReq{ - UserID: userID, - Status: status, PlatformID: int32(platformID), - }) - return err -} - -func (u *UserRpcClient) GetNotificationByID(ctx context.Context, userID string) error { - _, err := u.Client.GetNotificationAccount(ctx, &user.GetNotificationAccountReq{ - UserID: userID, - }) - return err -} - -func (u *UserRpcClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { - if len(userIDs) == 0 { - return nil, nil - } - resp, err := u.Client.GetUserStatus(ctx, &user.GetUserStatusReq{UserIDs: userIDs, UserID: u.imAdminUserID[0]}) - if err != nil { - return nil, err - } - return resp.StatusList, nil -} - -func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { - resp, err := u.GetUsersOnlinePlatform(ctx, []string{userID}) - if err != nil { - return nil, err - } - if len(resp) == 0 { - return nil, nil - } - return resp[0].PlatformIDs, nil -} - -func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) { - return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor}) -} diff --git a/pkg/rpcli/auth.go b/pkg/rpcli/auth.go new file mode 100644 index 000000000..17fc6ea28 --- /dev/null +++ b/pkg/rpcli/auth.go @@ -0,0 +1,30 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/auth" + "google.golang.org/grpc" +) + +func NewAuthClient(cc grpc.ClientConnInterface) *AuthClient { + return &AuthClient{auth.NewAuthClient(cc)} +} + +type AuthClient struct { + auth.AuthClient +} + +func (x *AuthClient) KickTokens(ctx context.Context, tokens []string) error { + if len(tokens) == 0 { + return nil + } + return ignoreResp(x.AuthClient.KickTokens(ctx, &auth.KickTokensReq{Tokens: tokens})) +} + +func (x *AuthClient) InvalidateToken(ctx context.Context, req *auth.InvalidateTokenReq) error { + return ignoreResp(x.AuthClient.InvalidateToken(ctx, req)) +} + +func (x *AuthClient) ParseToken(ctx context.Context, token string) (*auth.ParseTokenResp, error) { + return x.AuthClient.ParseToken(ctx, &auth.ParseTokenReq{Token: token}) +} diff --git a/pkg/rpcli/conversation.go b/pkg/rpcli/conversation.go new file mode 100644 index 000000000..ba5b90cb3 --- /dev/null +++ b/pkg/rpcli/conversation.go @@ -0,0 +1,94 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/conversation" + "google.golang.org/grpc" +) + +func NewConversationClient(cc grpc.ClientConnInterface) *ConversationClient { + return &ConversationClient{conversation.NewConversationClient(cc)} +} + +type ConversationClient struct { + conversation.ConversationClient +} + +func (x *ConversationClient) SetConversationMaxSeq(ctx context.Context, conversationID string, ownerUserIDs []string, maxSeq int64) error { + if len(ownerUserIDs) == 0 { + return nil + } + req := &conversation.SetConversationMaxSeqReq{ConversationID: conversationID, OwnerUserID: ownerUserIDs, MaxSeq: maxSeq} + return ignoreResp(x.ConversationClient.SetConversationMaxSeq(ctx, req)) +} + +func (x *ConversationClient) SetConversations(ctx context.Context, ownerUserIDs []string, info *conversation.ConversationReq) error { + if len(ownerUserIDs) == 0 { + return nil + } + req := &conversation.SetConversationsReq{UserIDs: ownerUserIDs, Conversation: info} + return ignoreResp(x.ConversationClient.SetConversations(ctx, req)) +} + +func (x *ConversationClient) GetConversationsByConversationIDs(ctx context.Context, conversationIDs []string) ([]*conversation.Conversation, error) { + if len(conversationIDs) == 0 { + return nil, nil + } + req := &conversation.GetConversationsByConversationIDReq{ConversationIDs: conversationIDs} + return extractField(ctx, x.ConversationClient.GetConversationsByConversationID, req, (*conversation.GetConversationsByConversationIDResp).GetConversations) +} + +func (x *ConversationClient) GetConversationsByConversationID(ctx context.Context, conversationID string) (*conversation.Conversation, error) { + return firstValue(x.GetConversationsByConversationIDs(ctx, []string{conversationID})) +} + +func (x *ConversationClient) SetConversationMinSeq(ctx context.Context, conversationID string, ownerUserIDs []string, minSeq int64) error { + if len(ownerUserIDs) == 0 { + return nil + } + req := &conversation.SetConversationMinSeqReq{ConversationID: conversationID, OwnerUserID: ownerUserIDs, MinSeq: minSeq} + return ignoreResp(x.ConversationClient.SetConversationMinSeq(ctx, req)) +} + +func (x *ConversationClient) GetConversation(ctx context.Context, conversationID string, ownerUserID string) (*conversation.Conversation, error) { + req := &conversation.GetConversationReq{ConversationID: conversationID, OwnerUserID: ownerUserID} + return extractField(ctx, x.ConversationClient.GetConversation, req, (*conversation.GetConversationResp).GetConversation) +} + +func (x *ConversationClient) GetConversations(ctx context.Context, conversationIDs []string, ownerUserID string) ([]*conversation.Conversation, error) { + if len(conversationIDs) == 0 { + return nil, nil + } + req := &conversation.GetConversationsReq{ConversationIDs: conversationIDs, OwnerUserID: ownerUserID} + return extractField(ctx, x.ConversationClient.GetConversations, req, (*conversation.GetConversationsResp).GetConversations) +} + +func (x *ConversationClient) GetConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) { + req := &conversation.GetConversationIDsReq{UserID: ownerUserID} + return extractField(ctx, x.ConversationClient.GetConversationIDs, req, (*conversation.GetConversationIDsResp).GetConversationIDs) +} + +func (x *ConversationClient) GetPinnedConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) { + req := &conversation.GetPinnedConversationIDsReq{UserID: ownerUserID} + return extractField(ctx, x.ConversationClient.GetPinnedConversationIDs, req, (*conversation.GetPinnedConversationIDsResp).GetConversationIDs) +} + +func (x *ConversationClient) CreateGroupChatConversations(ctx context.Context, groupID string, userIDs []string) error { + if len(userIDs) == 0 { + return nil + } + req := &conversation.CreateGroupChatConversationsReq{GroupID: groupID, UserIDs: userIDs} + return ignoreResp(x.ConversationClient.CreateGroupChatConversations(ctx, req)) +} + +func (x *ConversationClient) CreateSingleChatConversations(ctx context.Context, req *conversation.CreateSingleChatConversationsReq) error { + return ignoreResp(x.ConversationClient.CreateSingleChatConversations(ctx, req)) +} + +func (x *ConversationClient) GetConversationOfflinePushUserIDs(ctx context.Context, conversationID string, userIDs []string) ([]string, error) { + if len(userIDs) == 0 { + return nil, nil + } + req := &conversation.GetConversationOfflinePushUserIDsReq{ConversationID: conversationID, UserIDs: userIDs} + return extractField(ctx, x.ConversationClient.GetConversationOfflinePushUserIDs, req, (*conversation.GetConversationOfflinePushUserIDsResp).GetUserIDs) +} diff --git a/pkg/rpcli/group.go b/pkg/rpcli/group.go new file mode 100644 index 000000000..b51283c5d --- /dev/null +++ b/pkg/rpcli/group.go @@ -0,0 +1,48 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/sdkws" + "google.golang.org/grpc" +) + +func NewGroupClient(cc grpc.ClientConnInterface) *GroupClient { + return &GroupClient{group.NewGroupClient(cc)} +} + +type GroupClient struct { + group.GroupClient +} + +func (x *GroupClient) GetGroupsInfo(ctx context.Context, groupIDs []string) ([]*sdkws.GroupInfo, error) { + if len(groupIDs) == 0 { + return nil, nil + } + req := &group.GetGroupsInfoReq{GroupIDs: groupIDs} + return extractField(ctx, x.GroupClient.GetGroupsInfo, req, (*group.GetGroupsInfoResp).GetGroupInfos) +} + +func (x *GroupClient) GetGroupInfo(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { + return firstValue(x.GetGroupsInfo(ctx, []string{groupID})) +} + +func (x *GroupClient) GetGroupInfoCache(ctx context.Context, groupID string) (*sdkws.GroupInfo, error) { + req := &group.GetGroupInfoCacheReq{GroupID: groupID} + return extractField(ctx, x.GroupClient.GetGroupInfoCache, req, (*group.GetGroupInfoCacheResp).GetGroupInfo) +} + +func (x *GroupClient) GetGroupMemberCache(ctx context.Context, groupID string, userID string) (*sdkws.GroupMemberFullInfo, error) { + req := &group.GetGroupMemberCacheReq{GroupID: groupID, GroupMemberID: userID} + return extractField(ctx, x.GroupClient.GetGroupMemberCache, req, (*group.GetGroupMemberCacheResp).GetMember) +} + +func (x *GroupClient) DismissGroup(ctx context.Context, groupID string, deleteMember bool) error { + req := &group.DismissGroupReq{GroupID: groupID, DeleteMember: deleteMember} + return ignoreResp(x.GroupClient.DismissGroup(ctx, req)) +} + +func (x *GroupClient) GetGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error) { + req := &group.GetGroupMemberUserIDsReq{GroupID: groupID} + return extractField(ctx, x.GroupClient.GetGroupMemberUserIDs, req, (*group.GetGroupMemberUserIDsResp).GetUserIDs) +} diff --git a/pkg/rpcli/msg.go b/pkg/rpcli/msg.go new file mode 100644 index 000000000..0c44b7c8b --- /dev/null +++ b/pkg/rpcli/msg.go @@ -0,0 +1,90 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/msg" + "github.com/openimsdk/protocol/sdkws" + "google.golang.org/grpc" +) + +func NewMsgClient(cc grpc.ClientConnInterface) *MsgClient { + return &MsgClient{msg.NewMsgClient(cc)} +} + +type MsgClient struct { + msg.MsgClient +} + +func (x *MsgClient) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) { + if len(conversationIDs) == 0 { + return nil, nil + } + req := &msg.GetMaxSeqsReq{ConversationIDs: conversationIDs} + return extractField(ctx, x.MsgClient.GetMaxSeqs, req, (*msg.SeqsInfoResp).GetMaxSeqs) +} + +func (x *MsgClient) GetMsgByConversationIDs(ctx context.Context, conversationIDs []string, maxSeqs map[string]int64) (map[string]*sdkws.MsgData, error) { + if len(conversationIDs) == 0 || len(maxSeqs) == 0 { + return nil, nil + } + req := &msg.GetMsgByConversationIDsReq{ConversationIDs: conversationIDs, MaxSeqs: maxSeqs} + return extractField(ctx, x.MsgClient.GetMsgByConversationIDs, req, (*msg.GetMsgByConversationIDsResp).GetMsgDatas) +} + +func (x *MsgClient) GetHasReadSeqs(ctx context.Context, conversationIDs []string, userID string) (map[string]int64, error) { + if len(conversationIDs) == 0 { + return nil, nil + } + req := &msg.GetHasReadSeqsReq{ConversationIDs: conversationIDs, UserID: userID} + return extractField(ctx, x.MsgClient.GetHasReadSeqs, req, (*msg.SeqsInfoResp).GetMaxSeqs) +} + +func (x *MsgClient) SetUserConversationMaxSeq(ctx context.Context, conversationID string, ownerUserIDs []string, maxSeq int64) error { + if len(ownerUserIDs) == 0 { + return nil + } + req := &msg.SetUserConversationMaxSeqReq{ConversationID: conversationID, OwnerUserID: ownerUserIDs, MaxSeq: maxSeq} + return ignoreResp(x.MsgClient.SetUserConversationMaxSeq(ctx, req)) +} + +func (x *MsgClient) SetUserConversationMin(ctx context.Context, conversationID string, ownerUserIDs []string, minSeq int64) error { + if len(ownerUserIDs) == 0 { + return nil + } + req := &msg.SetUserConversationsMinSeqReq{ConversationID: conversationID, UserIDs: ownerUserIDs, Seq: minSeq} + return ignoreResp(x.MsgClient.SetUserConversationsMinSeq(ctx, req)) +} + +func (x *MsgClient) GetLastMessageSeqByTime(ctx context.Context, conversationID string, lastTime int64) (int64, error) { + req := &msg.GetLastMessageSeqByTimeReq{ConversationID: conversationID, Time: lastTime} + return extractField(ctx, x.MsgClient.GetLastMessageSeqByTime, req, (*msg.GetLastMessageSeqByTimeResp).GetSeq) +} + +func (x *MsgClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { + req := &msg.GetConversationMaxSeqReq{ConversationID: conversationID} + return extractField(ctx, x.MsgClient.GetConversationMaxSeq, req, (*msg.GetConversationMaxSeqResp).GetMaxSeq) +} + +func (x *MsgClient) GetActiveConversation(ctx context.Context, conversationIDs []string) ([]*msg.ActiveConversation, error) { + if len(conversationIDs) == 0 { + return nil, nil + } + req := &msg.GetActiveConversationReq{ConversationIDs: conversationIDs} + return extractField(ctx, x.MsgClient.GetActiveConversation, req, (*msg.GetActiveConversationResp).GetConversations) +} + +func (x *MsgClient) GetSeqMessage(ctx context.Context, userID string, conversations []*msg.ConversationSeqs) (map[string]*sdkws.PullMsgs, error) { + if len(conversations) == 0 { + return nil, nil + } + req := &msg.GetSeqMessageReq{UserID: userID, Conversations: conversations} + return extractField(ctx, x.MsgClient.GetSeqMessage, req, (*msg.GetSeqMessageResp).GetMsgs) +} + +func (x *MsgClient) SetUserConversationsMinSeq(ctx context.Context, conversationID string, userIDs []string, seq int64) error { + if len(userIDs) == 0 { + return nil + } + req := &msg.SetUserConversationsMinSeqReq{ConversationID: conversationID, UserIDs: userIDs, Seq: seq} + return ignoreResp(x.MsgClient.SetUserConversationsMinSeq(ctx, req)) +} diff --git a/pkg/rpcli/msggateway.go b/pkg/rpcli/msggateway.go new file mode 100644 index 000000000..3f19963d5 --- /dev/null +++ b/pkg/rpcli/msggateway.go @@ -0,0 +1,14 @@ +package rpcli + +import ( + "github.com/openimsdk/protocol/msggateway" + "google.golang.org/grpc" +) + +func NewMsgGatewayClient(cc grpc.ClientConnInterface) *MsgGatewayClient { + return &MsgGatewayClient{msggateway.NewMsgGatewayClient(cc)} +} + +type MsgGatewayClient struct { + msggateway.MsgGatewayClient +} diff --git a/pkg/rpcli/push.go b/pkg/rpcli/push.go new file mode 100644 index 000000000..bb33660e4 --- /dev/null +++ b/pkg/rpcli/push.go @@ -0,0 +1,14 @@ +package rpcli + +import ( + "github.com/openimsdk/protocol/push" + "google.golang.org/grpc" +) + +func NewPushMsgServiceClient(cc grpc.ClientConnInterface) *PushMsgServiceClient { + return &PushMsgServiceClient{push.NewPushMsgServiceClient(cc)} +} + +type PushMsgServiceClient struct { + push.PushMsgServiceClient +} diff --git a/pkg/rpcli/relation.go b/pkg/rpcli/relation.go new file mode 100644 index 000000000..dce0e7165 --- /dev/null +++ b/pkg/rpcli/relation.go @@ -0,0 +1,23 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/relation" + "google.golang.org/grpc" +) + +func NewRelationClient(cc grpc.ClientConnInterface) *RelationClient { + return &RelationClient{relation.NewFriendClient(cc)} +} + +type RelationClient struct { + relation.FriendClient +} + +func (x *RelationClient) GetFriendsInfo(ctx context.Context, ownerUserID string, friendUserIDs []string) ([]*relation.FriendInfoOnly, error) { + if len(friendUserIDs) == 0 { + return nil, nil + } + req := &relation.GetFriendInfoReq{OwnerUserID: ownerUserID, FriendUserIDs: friendUserIDs} + return extractField(ctx, x.FriendClient.GetFriendInfo, req, (*relation.GetFriendInfoResp).GetFriendInfos) +} diff --git a/pkg/rpcli/rtc.go b/pkg/rpcli/rtc.go new file mode 100644 index 000000000..18a79d6b4 --- /dev/null +++ b/pkg/rpcli/rtc.go @@ -0,0 +1,14 @@ +package rpcli + +import ( + "github.com/openimsdk/protocol/rtc" + "google.golang.org/grpc" +) + +func NewRtcServiceClient(cc grpc.ClientConnInterface) *RtcServiceClient { + return &RtcServiceClient{rtc.NewRtcServiceClient(cc)} +} + +type RtcServiceClient struct { + rtc.RtcServiceClient +} diff --git a/pkg/rpcli/third.go b/pkg/rpcli/third.go new file mode 100644 index 000000000..cbb28ff34 --- /dev/null +++ b/pkg/rpcli/third.go @@ -0,0 +1,14 @@ +package rpcli + +import ( + "github.com/openimsdk/protocol/third" + "google.golang.org/grpc" +) + +func NewThirdClient(cc grpc.ClientConnInterface) *ThirdClient { + return &ThirdClient{third.NewThirdClient(cc)} +} + +type ThirdClient struct { + third.ThirdClient +} diff --git a/pkg/rpcli/tool.go b/pkg/rpcli/tool.go new file mode 100644 index 000000000..2bd50bd00 --- /dev/null +++ b/pkg/rpcli/tool.go @@ -0,0 +1,32 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/tools/errs" + "google.golang.org/grpc" +) + +func extractField[A, B, C any](ctx context.Context, fn func(ctx context.Context, req *A, opts ...grpc.CallOption) (*B, error), req *A, get func(*B) C) (C, error) { + resp, err := fn(ctx, req) + if err != nil { + var c C + return c, err + } + return get(resp), nil +} + +func firstValue[A any](val []A, err error) (A, error) { + if err != nil { + var a A + return a, err + } + if len(val) == 0 { + var a A + return a, errs.ErrRecordNotFound.WrapMsg("record not found") + } + return val[0], nil +} + +func ignoreResp(_ any, err error) error { + return err +} diff --git a/pkg/rpcli/user.go b/pkg/rpcli/user.go new file mode 100644 index 000000000..357640345 --- /dev/null +++ b/pkg/rpcli/user.go @@ -0,0 +1,95 @@ +package rpcli + +import ( + "context" + "github.com/openimsdk/protocol/sdkws" + "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/utils/datautil" + "google.golang.org/grpc" +) + +func NewUserClient(cc grpc.ClientConnInterface) *UserClient { + return &UserClient{user.NewUserClient(cc)} +} + +type UserClient struct { + user.UserClient +} + +func (x *UserClient) GetUsersInfo(ctx context.Context, userIDs []string) ([]*sdkws.UserInfo, error) { + if len(userIDs) == 0 { + return nil, nil + } + req := &user.GetDesignateUsersReq{UserIDs: userIDs} + return extractField(ctx, x.UserClient.GetDesignateUsers, req, (*user.GetDesignateUsersResp).GetUsersInfo) +} + +func (x *UserClient) GetUserInfo(ctx context.Context, userID string) (*sdkws.UserInfo, error) { + return firstValue(x.GetUsersInfo(ctx, []string{userID})) +} + +func (x *UserClient) CheckUser(ctx context.Context, userIDs []string) error { + if len(userIDs) == 0 { + return nil + } + users, err := x.GetUsersInfo(ctx, userIDs) + if err != nil { + return err + } + if len(users) != len(userIDs) { + return errs.ErrRecordNotFound.WrapMsg("user not found") + } + return nil +} + +func (x *UserClient) GetUsersInfoMap(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error) { + users, err := x.GetUsersInfo(ctx, userIDs) + if err != nil { + return nil, err + } + return datautil.SliceToMap(users, func(e *sdkws.UserInfo) string { + return e.UserID + }), nil +} + +func (x *UserClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) { + req := &user.GetAllOnlineUsersReq{Cursor: cursor} + return x.UserClient.GetAllOnlineUsers(ctx, req) +} + +func (x *UserClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) { + if len(userIDs) == 0 { + return nil, nil + } + req := &user.GetUserStatusReq{UserIDs: userIDs} + return extractField(ctx, x.UserClient.GetUserStatus, req, (*user.GetUserStatusResp).GetStatusList) + +} + +func (x *UserClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { + status, err := x.GetUsersOnlinePlatform(ctx, []string{userID}) + if err != nil { + return nil, err + } + if len(status) == 0 { + return nil, nil + } + return status[0].PlatformIDs, nil +} + +func (x *UserClient) SetUserOnlineStatus(ctx context.Context, req *user.SetUserOnlineStatusReq) error { + if len(req.Status) == 0 { + return nil + } + return ignoreResp(x.UserClient.SetUserOnlineStatus(ctx, req)) +} + +func (x *UserClient) GetNotificationByID(ctx context.Context, userID string) error { + return ignoreResp(x.UserClient.GetNotificationAccount(ctx, &user.GetNotificationAccountReq{UserID: userID})) +} + +func (x *UserClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { + req := &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}} + return extractField(ctx, x.UserClient.GetAllUserID, req, (*user.GetAllUserIDResp).GetUserIDs) +}