merge: merge main code into js branch. (#2648)

* feat: update group notification when set to null. (#2590)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* feat: update group notification when set to null.

* update log standard.

* feat: add long time push msg in prometheus (#2584)

* feat: add long time push msg in prometheus

* fix: log print

* fix: go mod

* fix: log msg

* fix: log init

* feat: push msg

* feat: go mod ,remove cgo package

* feat: remove error log

* feat: test dummy push

* feat:redis pool config

* feat: push to kafka log

* feat: supports getting messages based on session ID and seq (#2582)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* feat: implement request batch count limit. (#2591)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* fix: getting messages based on session ID and seq (#2595)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* feat: avoid pulling messages from sessions with a large number of max seq values of 0 (#2602)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* refactor: improve db structure in `storage/controller` (#2604)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* refactor: improve db structure in `storage/controller`

* feat: implement offline push using kafka (#2600)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: implement offline push.

* feat: implement batch Push spilt

* update go mod

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: update OfflinePushConsumerHandler.

* feat: API supports gzip (#2609)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* Fix err (#2608)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: add rocksTimeout

* feat: wrap logs

* feat: add logs

* feat: listen config

* feat: enable listen TIME_WAIT port

* feat: add logs

* feat: cache batch

* chore: enable fullUserCache

* feat: push rpc num

* feat: push err

* feat: with operationID

* feat: sleep

* feat: change 1s

* feat: change log

* feat: implement Getbatch in rpcCache.

* feat: print getOnline cost

* feat: change log

* feat: change kafka and push config

* feat: del interface

* feat: fix err

* feat: change config

* feat: go mod

* feat: change config

* feat: change config

* feat: add sleep in push

* feat: warn logs

* feat: logs

* feat: logs

* feat: change port

* feat: start config

* feat: remove port reuse

* feat: prometheus config

* feat: prometheus config

* feat: prometheus config

* feat: add long time send msg to grafana

* feat: init

* feat: init

* feat: implement offline push.

* feat: batch get user online

* feat: implement batch Push spilt

* update go mod

* Revert "feat: change port"

This reverts commit 06d5e944

* feat: change port

* feat: change config

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: get all online users and init push

* feat: lock in online cache

* feat: config

* fix: init online status

* fix: add logs

* fix: userIDs

* fix: add logs

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: method name

* fix: update OfflinePushConsumerHandler.

* fix: prommetrics

* fix: add logs

* fix: ctx

* fix: log

* fix: config

* feat: change port

* fix: atomic online cache status

---------

Co-authored-by: Monet Lee <monet_lee@163.com>

* feature: add GetConversationsHasReadAndMaxSeq interface to the WebSocket API. (#2611)

* fix: lru lock (#2613)

* fix: lru lock

* fix: lru lock

* fix: lru lock

* fix: nil pointer error on close (#2618)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

* go.mod

* fix: nil pointer error on close

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* feat: create group can push notification (#2617)

* fix: blockage caused by listen error (#2620)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

* go.mod

* fix: nil pointer error on close

* fix: listen error

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* fix: go.mod (#2621)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

* go.mod

* fix: nil pointer error on close

* fix: listen error

* fix: listen error

* update go.mod

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* feat: improve searchMsg implement. (#2614)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* remove unused script.

* feat: improve searchMsg implement.

* update mongo config.

* Fix lock (#2622)

* fix:log

* fix: lock

* fix: update setGroupInfoEX field name. (#2625)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* fix: update setGroupInfoEX field name.

* fix: update setGroupInfoEX field name (#2626)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* fix: update setGroupInfoEX field name.

* fix: update setGroupInfoEX field name

* feat: msg gateway add log (#2631)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

* go.mod

* fix: nil pointer error on close

* fix: listen error

* fix: listen error

* update go.mod

* feat: add log

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* fix: update setGroupInfoEx func name and field. (#2634)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* fix: update setGroupInfoEx func name and field.

* refactor: update groupinfoEx field.

* refactor: update database name in mongodb.yml

* add groupName Condition

* fix: fix setConversations req fill. (#2645)

* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* fix: fix setConversations req fill.

* fix: GetMsgBySeqs boundary issues (#2647)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* update gomake version

* update gomake version

* fix: seq conversion bug

* fix: redis pipe exec

* fix: ImportFriends

* fix: A large number of logs keysAndValues ​​length is not even

* feat: mark read aggregate write

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* feat: online status supports redis cluster

* merge

* merge

* read seq is written to mongo

* read seq is written to mongo

* fix: invitation to join group notification

* fix: friend op_user_id

* feat: optimizing asynchronous context

* feat: optimizing memamq size

* feat: add GetSeqMessage

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: GroupApplicationAgreeMemberEnterNotification

* feat: go.mod

* feat: go.mod

* feat: join group notification and get seq

* feat: join group notification and get seq

* feat: avoid pulling messages from sessions with a large number of max seq values of 0

* feat: API supports gzip

* go.mod

* fix: nil pointer error on close

* fix: listen error

* fix: listen error

* update go.mod

* feat: add log

* fix: token parse token value

* fix: GetMsgBySeqs boundary issues

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>

* fix: the attribute version is obsolete, remove it (#2644)

* refactor: update Userregister request field. (#2650)

---------

Co-authored-by: Monet Lee <monet_lee@163.com>
Co-authored-by: icey-yu <119291641+icey-yu@users.noreply.github.com>
Co-authored-by: chao <48119764+withchao@users.noreply.github.com>
Co-authored-by: withchao <withchao@users.noreply.github.com>
Co-authored-by: 蔡相跃 <caixiangyue007@gmail.com>
This commit is contained in:
OpenIM-Gordon 2024-09-23 15:23:04 +08:00 committed by GitHub
parent 758606f627
commit 38a989b9fa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
84 changed files with 3658 additions and 2904 deletions

View File

@ -54,7 +54,7 @@
"liveNow": false, "liveNow": false,
"panels": [ "panels": [
{ {
"collapsed": true, "collapsed": false,
"gridPos": { "gridPos": {
"h": 1, "h": 1,
"w": 24, "w": 24,
@ -62,7 +62,10 @@
"y": 0 "y": 0
}, },
"id": 35, "id": 35,
"panels": [ "panels": [],
"title": "Server",
"type": "row"
},
{ {
"datasource": { "datasource": {
"type": "prometheus", "type": "prometheus",
@ -115,7 +118,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -220,7 +224,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -355,7 +360,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -477,7 +483,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -595,7 +602,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -745,7 +753,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -776,8 +785,8 @@
}, },
"gridPos": { "gridPos": {
"h": 11, "h": 11,
"w": 6, "w": 8,
"x": 4, "x": 0,
"y": 33 "y": 33
}, },
"id": 42, "id": 42,
@ -807,7 +816,7 @@
"hide": false, "hide": false,
"instant": false, "instant": false,
"interval": "", "interval": "",
"legendFormat": "failed msgs", "legendFormat": "addr:{{instance}}",
"range": true, "range": true,
"refId": "A" "refId": "A"
} }
@ -867,7 +876,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -898,8 +908,8 @@
}, },
"gridPos": { "gridPos": {
"h": 11, "h": 11,
"w": 6, "w": 8,
"x": 14, "x": 8,
"y": 33 "y": 33
}, },
"id": 43, "id": 43,
@ -929,7 +939,7 @@
"hide": false, "hide": false,
"instant": false, "instant": false,
"interval": "", "interval": "",
"legendFormat": "failed addr: {{instance}}", "legendFormat": "addr: {{instance}}",
"range": true, "range": true,
"refId": "A" "refId": "A"
} }
@ -937,6 +947,129 @@
"title": "Seq Set Failed Num", "title": "Seq Set Failed Num",
"type": "timeseries" "type": "timeseries"
}, },
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"description": "This metric represents the number of messages that take a long time to send.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"fieldMinMax": false,
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "none"
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "failed msgs"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "dark-red",
"mode": "fixed",
"seriesBy": "last"
}
}
]
}
]
},
"gridPos": {
"h": 11,
"w": 8,
"x": 16,
"y": 33
},
"id": 60,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"exemplar": false,
"expr": "msg_long_time_push_total",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "addr:{{instance}}",
"range": true,
"refId": "A"
}
],
"title": "Long Time Send Msg Total",
"type": "timeseries"
},
{ {
"datasource": { "datasource": {
"type": "prometheus", "type": "prometheus",
@ -989,7 +1122,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -1107,7 +1241,8 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green" "color": "green",
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -1172,10 +1307,6 @@
], ],
"title": "Msg Failed Insert Num", "title": "Msg Failed Insert Num",
"type": "timeseries" "type": "timeseries"
}
],
"title": "Server",
"type": "row"
}, },
{ {
"collapsed": true, "collapsed": true,
@ -1183,7 +1314,7 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 1 "y": 54
}, },
"id": 22, "id": 22,
"panels": [ "panels": [
@ -1973,7 +2104,7 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 2 "y": 55
}, },
"id": 28, "id": 28,
"panels": [ "panels": [
@ -2827,7 +2958,7 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 3 "y": 56
}, },
"id": 25, "id": 25,
"panels": [ "panels": [
@ -3377,18 +3508,15 @@
"type": "row" "type": "row"
}, },
{ {
"collapsed": false, "collapsed": true,
"gridPos": { "gridPos": {
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 4 "y": 57
}, },
"id": 6, "id": 6,
"panels": [], "panels": [
"title": "Process",
"type": "row"
},
{ {
"datasource": { "datasource": {
"type": "prometheus", "type": "prometheus",
@ -4062,8 +4190,7 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green", "color": "green"
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -4166,8 +4293,7 @@
"mode": "absolute", "mode": "absolute",
"steps": [ "steps": [
{ {
"color": "green", "color": "green"
"value": null
}, },
{ {
"color": "red", "color": "red",
@ -4220,6 +4346,10 @@
], ],
"title": "Resident Memory bytes", "title": "Resident Memory bytes",
"type": "timeseries" "type": "timeseries"
}
],
"title": "Process",
"type": "row"
}, },
{ {
"collapsed": true, "collapsed": true,
@ -4227,7 +4357,7 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 49 "y": 58
}, },
"id": 3, "id": 3,
"panels": [ "panels": [
@ -5441,6 +5571,6 @@
"timezone": "", "timezone": "",
"title": "Demo", "title": "Demo",
"uid": "a506d250-b606-4702-86a7-ac6aa1d069a1", "uid": "a506d250-b606-4702-86a7-ac6aa1d069a1",
"version": 23, "version": 2,
"weekStart": "" "weekStart": ""
} }

View File

@ -14,12 +14,16 @@ toRedisTopic: toRedis
toMongoTopic: toMongo toMongoTopic: toMongo
# Kafka topic for push notifications # Kafka topic for push notifications
toPushTopic: toPush toPushTopic: toPush
# Kafka topic for offline push notifications
toOfflinePushTopic: toOfflinePush
# Consumer group ID for Redis topic # Consumer group ID for Redis topic
toRedisGroupID: redis toRedisGroupID: redis
# Consumer group ID for MongoDB topic # Consumer group ID for MongoDB topic
toMongoGroupID: mongo toMongoGroupID: mongo
# Consumer group ID for push notifications topic # Consumer group ID for push notifications topic
toPushGroupID: push toPushGroupID: push
# Consumer group ID for offline push notifications topic
toOfflinePushGroupID: offlinePush
# TLS (Transport Layer Security) configuration # TLS (Transport Layer Security) configuration
tls: tls:
# Enable or disable TLS # Enable or disable TLS

View File

@ -3,11 +3,14 @@ api:
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports # Listening ports; if multiple are configured, multiple instances will be launched, must be consistent with the number of prometheus.ports
ports: [ 10002 ] ports: [ 10002 ]
# API compression level; 0: default compression, 1: best compression, 2: best speed, -1: no compression
compressionLevel: 0
prometheus: prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# Prometheus listening ports, must match the number of api.ports # Prometheus listening ports, must match the number of api.ports
ports: [ 20502 ] ports: [ 12002 ]
# This address can be accessed via a browser # This address can be accessed via a browser
grafanaURL: http://127.0.0.1:13000/ grafanaURL: http://127.0.0.1:13000/

View File

@ -2,13 +2,13 @@ rpc:
# The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP # The IP address where this RPC service registers itself; if left blank, it defaults to the internal network IP
registerIP: registerIP:
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10140 ] ports: [ 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 10149, 10150, 10151, 10152, 10153, 10154, 10155 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20640 ] ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ]
# IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
@ -25,6 +25,3 @@ longConnSvr:
# 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time
multiLoginPolicy: 1 multiLoginPolicy: 1

View File

@ -3,4 +3,4 @@ prometheus:
enable: true enable: true
# List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly
# Because four instances have been launched, four ports need to be specified # Because four instances have been launched, four ports need to be specified
ports: [ 20600, 20601, 20602, 20603 ] ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027 ]

View File

@ -4,13 +4,13 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10170, 10171, 10172, 10173 ] ports: [ 10170, 10171, 10172, 10173, 10174, 10175, 10176, 10177, 10178, 10179, 10180, 10181, 10182, 10183, 10184, 10185 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20670, 20671, 20672, 20673 ] ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185 ]
maxConcurrentWorkers: 3 maxConcurrentWorkers: 3
#Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified. #Use geTui for offline push notifications, or choose fcm or jpns; corresponding configuration settings must be specified.
@ -38,9 +38,4 @@ iosPush:
badgeCount: true badgeCount: true
production: false production: false
fullUserCache: true

View File

@ -4,15 +4,14 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10160 ] ports: [ 10200 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20660 ] ports: [ 12200 ]
tokenPolicy: tokenPolicy:
# Token validity period, in days # Token validity period, in days
expire: 90 expire: 90

View File

@ -4,10 +4,10 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10180 ] ports: [ 10220 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20680 ] ports: [ 12220 ]

View File

@ -4,10 +4,10 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10120 ] ports: [ 10240 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20620 ] ports: [ 12240 ]

View File

@ -4,13 +4,13 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10150 ] ports: [ 10260 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20650 ] ports: [ 12260 ]
enableHistoryForNewMembers: true enableHistoryForNewMembers: true

View File

@ -4,17 +4,14 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10130 ] ports: [ 10280 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20630 ] ports: [ 12280 ]
# Does sending messages require friend verification # Does sending messages require friend verification
friendVerify: false friendVerify: false

View File

@ -4,13 +4,13 @@ rpc:
# IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports # List of ports that the RPC service listens on; configuring multiple ports will launch multiple instances. These must match the number of configured prometheus ports
ports: [ 10190 ] ports: [ 10300 ]
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
ports: [ 20690 ] ports: [ 12300 ]
object: object:

View File

@ -4,14 +4,10 @@ rpc:
# Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default # Listening IP; 0.0.0.0 means both internal and external IPs are listened to, if blank, the internal network IP is automatically obtained by default
listenIP: 0.0.0.0 listenIP: 0.0.0.0
# Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports # Listening ports; if multiple are configured, multiple instances will be launched, and must be consistent with the number of prometheus.ports
ports: [ 10110 ] ports: [ 10320 ]
prometheus: prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# Prometheus listening ports, must be consistent with the number of rpc.ports # Prometheus listening ports, must be consistent with the number of rpc.ports
ports: [ 20610 ] ports: [ 12320 ]

View File

@ -28,56 +28,59 @@ scrape_configs:
- targets: [ internal_ip:20500 ] - targets: [ internal_ip:20500 ]
- job_name: openimserver-openim-api - job_name: openimserver-openim-api
static_configs: static_configs:
- targets: [ internal_ip:20502 ] - targets: [ internal_ip:12002 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-msggateway - job_name: openimserver-openim-msggateway
static_configs: static_configs:
- targets: [ internal_ip:20640 ] - targets: [ internal_ip:12140 ]
# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-msgtransfer - job_name: openimserver-openim-msgtransfer
static_configs: static_configs:
- targets: [ internal_ip:20600, internal_ip:20601, internal_ip:20602, internal_ip:20603 ] - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ]
# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-push - job_name: openimserver-openim-push
static_configs: static_configs:
- targets: [ internal_ip:20670, internal_ip:20671, internal_ip:20672, internal_ip:20673] - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ]
# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12181, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-auth - job_name: openimserver-openim-rpc-auth
static_configs: static_configs:
- targets: [ internal_ip:20600 ] - targets: [ internal_ip:12200 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-conversation - job_name: openimserver-openim-rpc-conversation
static_configs: static_configs:
- targets: [ internal_ip:20680 ] - targets: [ internal_ip:12220 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-friend - job_name: openimserver-openim-rpc-friend
static_configs: static_configs:
- targets: [ internal_ip:20620 ] - targets: [ internal_ip:12240 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-group - job_name: openimserver-openim-rpc-group
static_configs: static_configs:
- targets: [ internal_ip:20650 ] - targets: [ internal_ip:12260 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-msg - job_name: openimserver-openim-rpc-msg
static_configs: static_configs:
- targets: [ internal_ip:20630 ] - targets: [ internal_ip:12280 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-third - job_name: openimserver-openim-rpc-third
static_configs: static_configs:
- targets: [ internal_ip:20690 ] - targets: [ internal_ip:12300 ]
labels: labels:
namespace: default namespace: default
- job_name: openimserver-openim-rpc-user - job_name: openimserver-openim-rpc-user
static_configs: static_configs:
- targets: [ internal_ip:20610 ] - targets: [ internal_ip:12320 ]
labels: labels:
namespace: default namespace: default

View File

@ -4,3 +4,4 @@ password: openIM123
clusterMode: false clusterMode: false
db: 0 db: 0
maxRetry: 10 maxRetry: 10
poolSize: 100

View File

@ -130,10 +130,10 @@ beforeSetGroupInfo:
enable: false enable: false
timeout: 5 timeout: 5
failedContinue: true failedContinue: true
afterSetGroupInfoEX: afterSetGroupInfoEx:
enable: false enable: false
timeout: 5 timeout: 5
beforeSetGroupInfoEX: beforeSetGroupInfoEx:
enable: false enable: false
timeout: 5 timeout: 5
failedContinue: true failedContinue: true

View File

@ -1,5 +1,3 @@
version: '3'
networks: networks:
openim: openim:
driver: bridge driver: bridge
@ -186,4 +184,3 @@ services:
# networks: # networks:
# - openim # - openim

48
go.mod
View File

@ -6,21 +6,21 @@ require (
firebase.google.com/go v3.13.0+incompatible firebase.google.com/go v3.13.0+incompatible
github.com/dtm-labs/rockscache v0.1.1 github.com/dtm-labs/rockscache v0.1.1
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/go-playground/validator/v10 v10.18.0 github.com/go-playground/validator/v10 v10.20.0
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v4 v4.5.0
github.com/gorilla/websocket v1.5.1 github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.9 github.com/openimsdk/protocol v0.0.72-alpha.25
github.com/openimsdk/tools v0.0.49-alpha.55 github.com/openimsdk/tools v0.0.50-alpha.12
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
go.mongodb.org/mongo-driver v1.14.0 go.mongodb.org/mongo-driver v1.14.0
google.golang.org/api v0.165.0 google.golang.org/api v0.165.0
google.golang.org/grpc v1.62.1 google.golang.org/grpc v1.66.2
google.golang.org/protobuf v1.33.0 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
@ -29,6 +29,7 @@ require github.com/google/uuid v1.6.0
require ( require (
github.com/IBM/sarama v1.43.0 github.com/IBM/sarama v1.43.0
github.com/fatih/color v1.14.1 github.com/fatih/color v1.14.1
github.com/gin-contrib/gzip v1.0.1
github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redis v6.15.9+incompatible
github.com/go-redis/redismock/v9 v9.2.0 github.com/go-redis/redismock/v9 v9.2.0
github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/golang-lru/v2 v2.0.7
@ -42,13 +43,12 @@ require (
github.com/stathat/consistent v1.0.0 github.com/stathat/consistent v1.0.0
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.6.0 golang.org/x/sync v0.8.0
) )
require ( require (
cloud.google.com/go v0.112.0 // indirect cloud.google.com/go v0.112.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/firestore v1.14.0 // indirect cloud.google.com/go/firestore v1.14.0 // indirect
cloud.google.com/go/iam v1.1.5 // indirect cloud.google.com/go/iam v1.1.5 // indirect
cloud.google.com/go/longrunning v0.5.4 // indirect cloud.google.com/go/longrunning v0.5.4 // indirect
@ -73,10 +73,12 @@ require (
github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect
github.com/aws/smithy-go v1.17.0 // indirect github.com/aws/smithy-go v1.17.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect github.com/bytedance/sonic v1.11.6 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clbanning/mxj v1.8.4 // indirect github.com/clbanning/mxj v1.8.4 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -117,7 +119,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kelindar/simd v1.1.2 // indirect github.com/kelindar/simd v1.1.2 // indirect
github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/leodido/go-urn v1.4.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect
github.com/lestrrat-go/strftime v1.0.6 // indirect github.com/lestrrat-go/strftime v1.0.6 // indirect
github.com/lithammer/shortuuid v3.0.0+incompatible // indirect github.com/lithammer/shortuuid v3.0.0+incompatible // indirect
@ -132,7 +134,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
github.com/mozillazg/go-httpheader v0.4.0 // indirect github.com/mozillazg/go-httpheader v0.4.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
@ -169,17 +171,17 @@ require (
go.opentelemetry.io/otel/trace v1.23.0 // indirect go.opentelemetry.io/otel/trace v1.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.3.0 // indirect golang.org/x/arch v0.7.0 // indirect
golang.org/x/image v0.15.0 // indirect golang.org/x/image v0.15.0 // indirect
golang.org/x/net v0.22.0 // indirect golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.19.0 // indirect golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
gorm.io/gorm v1.25.8 // indirect gorm.io/gorm v1.25.8 // indirect
stathat.com/c/consistent v1.0.0 // indirect stathat.com/c/consistent v1.0.0 // indirect
) )
@ -187,10 +189,10 @@ require (
require ( require (
github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/locales v0.14.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-json v0.10.2 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
github.com/ugorji/go/codec v1.2.11 // indirect github.com/ugorji/go/codec v1.2.12 // indirect
go.uber.org/zap v1.24.0 // indirect go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.27.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
) )

104
go.sum
View File

@ -1,10 +1,8 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw=
cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
@ -65,21 +63,23 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@ -121,6 +121,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gin-contrib/gzip v1.0.1 h1:HQ8ENHODeLY7a4g1Au/46Z92bdGFl74OhxcZble9WJE=
github.com/gin-contrib/gzip v1.0.1/go.mod h1:njt428fdUNRvjuJf16tZMYZ2Yl+WQB53X5wmhDwXvC4=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
@ -144,8 +146,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw= github.com/go-redis/redismock/v9 v9.2.0 h1:ZrMYQeKPECZPjOj5u9eyOjg8Nnb0BS9lkVIZ6IpsKLw=
@ -257,8 +259,9 @@ github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLA
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@ -288,8 +291,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@ -319,12 +322,12 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.9 h1:Dyx4vs88IU4rJ2YcP/TdYp4ww8JjsMkV89hB/Eazx+A= github.com/openimsdk/protocol v0.0.72-alpha.25 h1:W8E6gnwt5V6anr/8lYOf5v/Lcsggf7gIAzJbw7YU6So=
github.com/openimsdk/protocol v0.0.72-alpha.9/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8= github.com/openimsdk/protocol v0.0.72-alpha.25/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.49-alpha.55 h1:KPgC53oqiwZYssLKljhtXbWXifMlTj2SSQEusj4Uf4k= github.com/openimsdk/tools v0.0.50-alpha.12 h1:rV3BxgqN+F79vZvdoQ+97Eob8ScsRVEM8D+Wrcl23uo=
github.com/openimsdk/tools v0.0.49-alpha.55/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4= github.com/openimsdk/tools v0.0.50-alpha.12/go.mod h1:h1cYmfyaVtgFbKmb1Cfsl8XwUOMTt8ubVUQrdGtsUh4=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@ -408,8 +411,8 @@ github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
@ -456,8 +459,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -465,8 +468,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
@ -493,19 +496,19 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -520,8 +523,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -534,8 +537,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -564,17 +567,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -586,8 +589,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@ -608,6 +611,7 @@ gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo=
gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.8/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c= stathat.com/c/consistent v1.0.0 h1:ezyc51EGcRPJUxfHGSgJjWzJdj3NiMU9pNfLNGiXV0c=
stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0=

View File

@ -35,8 +35,8 @@ func (o *GroupApi) SetGroupInfo(c *gin.Context) {
a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c) a2r.Call(group.GroupClient.SetGroupInfo, o.Client, c)
} }
func (o *GroupApi) SetGroupInfoEX(c *gin.Context) { func (o *GroupApi) SetGroupInfoEx(c *gin.Context) {
a2r.Call(group.GroupClient.SetGroupInfoEX, o.Client, c) a2r.Call(group.GroupClient.SetGroupInfoEx, o.Client, c)
} }
func (o *GroupApi) JoinGroup(c *gin.Context) { func (o *GroupApi) JoinGroup(c *gin.Context) {

View File

@ -2,6 +2,7 @@ package api
import ( import (
"fmt" "fmt"
"github.com/gin-contrib/gzip"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding" "github.com/gin-gonic/gin/binding"
@ -22,6 +23,13 @@ import (
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
) )
const (
NoCompression = -1
DefaultCompression = 0
BestCompression = 1
BestSpeed = 2
)
func prommetricsGin() gin.HandlerFunc { func prommetricsGin() gin.HandlerFunc {
return func(c *gin.Context) { return func(c *gin.Context) {
c.Next() c.Next()
@ -54,7 +62,15 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation) conversationRpc := rpcclient.NewConversation(disCov, config.Share.RpcRegisterName.Conversation)
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth) authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL) thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
switch config.API.Api.CompressionLevel {
case NoCompression:
case DefaultCompression:
r.Use(gzip.Gzip(gzip.DefaultCompression))
case BestCompression:
r.Use(gzip.Gzip(gzip.BestCompression))
case BestSpeed:
r.Use(gzip.Gzip(gzip.BestSpeed))
}
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc)) r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc) u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
@ -114,7 +130,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
{ {
groupRouterGroup.POST("/create_group", g.CreateGroup) groupRouterGroup.POST("/create_group", g.CreateGroup)
groupRouterGroup.POST("/set_group_info", g.SetGroupInfo) groupRouterGroup.POST("/set_group_info", g.SetGroupInfo)
groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEX) groupRouterGroup.POST("/set_group_info_ex", g.SetGroupInfoEx)
groupRouterGroup.POST("/join_group", g.JoinGroup) groupRouterGroup.POST("/join_group", g.JoinGroup)
groupRouterGroup.POST("/quit_group", g.QuitGroup) groupRouterGroup.POST("/quit_group", g.QuitGroup)
groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse) groupRouterGroup.POST("/group_application_response", g.ApplicationGroupResponse)

View File

@ -22,6 +22,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
@ -30,7 +32,6 @@ import (
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"google.golang.org/protobuf/proto"
) )
var ( var (
@ -220,6 +221,10 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq) resp, messageErr = c.longConnServer.SendSignalMessage(ctx, binaryReq)
case WSPullMsgBySeqList: case WSPullMsgBySeqList:
resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq) resp, messageErr = c.longConnServer.PullMessageBySeqList(ctx, binaryReq)
case WSPullMsg:
resp, messageErr = c.longConnServer.GetSeqMessage(ctx, binaryReq)
case WSGetConvMaxReadSeq:
resp, messageErr = c.longConnServer.GetConversationsHasReadAndMaxSeq(ctx, binaryReq)
case WsLogoutMsg: case WsLogoutMsg:
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus: case WsSetBackgroundStatus:

View File

@ -39,6 +39,8 @@ const (
WSPullMsgBySeqList = 1002 WSPullMsgBySeqList = 1002
WSSendMsg = 1003 WSSendMsg = 1003
WSSendSignalMsg = 1004 WSSendSignalMsg = 1004
WSPullMsg = 1005
WSGetConvMaxReadSeq = 1006
WSPushMsg = 2001 WSPushMsg = 2001
WSKickOnlineMsg = 2002 WSKickOnlineMsg = 2002
WsLogoutMsg = 2003 WsLogoutMsg = 2003

View File

@ -66,12 +66,16 @@ func (c *UserConnContext) Value(key any) any {
} }
func newContext(respWriter http.ResponseWriter, req *http.Request) *UserConnContext { func newContext(respWriter http.ResponseWriter, req *http.Request) *UserConnContext {
remoteAddr := req.RemoteAddr
if forwarded := req.Header.Get("X-Forwarded-For"); forwarded != "" {
remoteAddr += "_" + forwarded
}
return &UserConnContext{ return &UserConnContext{
RespWriter: respWriter, RespWriter: respWriter,
Req: req, Req: req,
Path: req.URL.Path, Path: req.URL.Path,
Method: req.Method, Method: req.Method,
RemoteAddr: req.RemoteAddr, RemoteAddr: remoteAddr,
ConnID: encrypt.Md5(req.RemoteAddr + "_" + strconv.Itoa(int(timeutil.GetCurrentTimestampByMill()))), ConnID: encrypt.Md5(req.RemoteAddr + "_" + strconv.Itoa(int(timeutil.GetCurrentTimestampByMill()))),
} }
} }

View File

@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
) )
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error { hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges) longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
return nil return nil
}) })

View File

@ -19,6 +19,8 @@ import (
"sync" "sync"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
@ -27,7 +29,6 @@ import (
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
) )
type Req struct { type Req struct {
@ -94,6 +95,8 @@ type MessageHandler interface {
SendMessage(context context.Context, data *Req) ([]byte, error) SendMessage(context context.Context, data *Req) ([]byte, error)
SendSignalMessage(context context.Context, data *Req) ([]byte, error) SendSignalMessage(context context.Context, data *Req) ([]byte, error)
PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error)
GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error)
GetSeqMessage(context context.Context, data *Req) ([]byte, error)
UserLogout(context context.Context, data *Req) ([]byte, error) UserLogout(context context.Context, data *Req) ([]byte, error)
SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error) SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error)
} }
@ -175,7 +178,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]by
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) { func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) {
req := sdkws.PullMessageBySeqsReq{} req := sdkws.PullMessageBySeqsReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil { if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "PullMessageBySeqsReq") return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "PullMessageBySeqsReq")
} }
if err := g.validate.Struct(data); err != nil { if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq") return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "PullMessageBySeqsReq")
@ -191,6 +194,44 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([
return c, nil return c, nil
} }
func (g GrpcHandler) GetConversationsHasReadAndMaxSeq(context context.Context, data *Req) ([]byte, error) {
req := msg.GetConversationsHasReadAndMaxSeqReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "err proto unmarshal", "action", "unmarshal", "dataType", "GetConversationsHasReadAndMaxSeq")
}
if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetConversationsHasReadAndMaxSeq")
}
resp, err := g.msgRpcClient.GetConversationsHasReadAndMaxSeq(context, &req)
if err != nil {
return nil, err
}
c, err := proto.Marshal(resp)
if err != nil {
return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetConversationsHasReadAndMaxSeq")
}
return c, nil
}
func (g GrpcHandler) GetSeqMessage(context context.Context, data *Req) ([]byte, error) {
req := msg.GetSeqMessageReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil {
return nil, errs.WrapMsg(err, "error unmarshaling request", "action", "unmarshal", "dataType", "GetSeqMessage")
}
if err := g.validate.Struct(data); err != nil {
return nil, errs.WrapMsg(err, "validation failed", "action", "validate", "dataType", "GetSeqMessage")
}
resp, err := g.msgRpcClient.GetSeqMessage(context, &req)
if err != nil {
return nil, err
}
c, err := proto.Marshal(resp)
if err != nil {
return nil, errs.WrapMsg(err, "error marshaling response", "action", "marshal", "dataType", "GetSeqMessage")
}
return c, nil
}
func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) { func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) {
req := push.DelUserPushTokenReq{} req := push.DelUserPushTokenReq{}
if err := proto.Unmarshal(data.Data, &req); err != nil { if err := proto.Unmarshal(data.Data, &req); err != nil {

View File

@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) {
if clientOK { if clientOK {
ws.clients.Set(client.UserID, client) ws.clients.Set(client.UserID, client)
// There is already a connection to the platform // There is already a connection to the platform
log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID", log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID",
client.PlatformID, "old remote addr", getRemoteAdders(oldClients)) client.PlatformID, "old remote addr", getRemoteAdders(oldClients))
ws.onlineUserConnNum.Add(1) ws.onlineUserConnNum.Add(1)
} else { } else {
@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) {
wg.Wait() wg.Wait()
log.ZInfo( log.ZDebug(
client.ctx, client.ctx,
"user online", "user online",
"online user Num", "online user Num",
@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) {
ws.onlineUserConnNum.Add(-1) ws.onlineUserConnNum.Add(-1)
ws.subscription.DelClient(client) ws.subscription.DelClient(client)
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) //ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(), ws.onlineUserConnNum.Load(),
) )
@ -425,6 +425,7 @@ func (ws *WsServer) wsHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
log.ZDebug(connContext, "new conn", "token", connContext.GetToken())
// Create a WebSocket long connection object // Create a WebSocket long connection object
wsLongConn := newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize) wsLongConn := newGWebSocket(WebSocket, ws.handshakeTimeout, ws.writeBufferSize)
if err := wsLongConn.GenerateLongConn(w, r); err != nil { if err := wsLongConn.GenerateLongConn(w, r); err != nil {

View File

@ -18,19 +18,20 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -65,6 +66,7 @@ type Config struct {
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, index int, config *Config) error {
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts", log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts",
config.MsgTransfer.Prometheus.Ports, "index", index) config.MsgTransfer.Prometheus.Ports, "index", index)
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
if err != nil { if err != nil {
return err return err
@ -73,12 +75,13 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil { if err != nil {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, &config.Share) client, err := discRegister.NewDiscoveryRegister(&config.Discovery, &config.Share)
if err != nil { if err != nil {
return err return err
} }
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := redis.NewMsgCache(rdb) msgModel := redis.NewMsgCache(rdb)
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil { if err != nil {
@ -94,20 +97,21 @@ func Start(ctx context.Context, index int, config *Config) error {
return err return err
} }
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil { if err != nil {
return err return err
} }
conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgDatabase, &conversationRpcClient, &groupRpcClient) historyCH, err := NewOnlineHistoryRedisConsumerHandler(&config.KafkaConfig, msgTransferDatabase, &conversationRpcClient, &groupRpcClient)
if err != nil { if err != nil {
return err return err
} }
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgDatabase) historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase)
if err != nil { if err != nil {
return err return err
} }
msgTransfer := &MsgTransfer{ msgTransfer := &MsgTransfer{
historyCH: historyCH, historyCH: historyCH,
historyMongoCH: historyMongoCH, historyMongoCH: historyMongoCH,

View File

@ -18,6 +18,10 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"strconv"
"strings"
"time"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -33,9 +37,6 @@ import (
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"strconv"
"strings"
"time"
) )
const ( const (
@ -56,19 +57,19 @@ type OnlineHistoryRedisConsumerHandler struct {
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage] redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
msgDatabase controller.CommonMsgDatabase msgTransferDatabase controller.MsgTransferDatabase
conversationRpcClient *rpcclient.ConversationRpcClient conversationRpcClient *rpcclient.ConversationRpcClient
groupRpcClient *rpcclient.GroupRpcClient groupRpcClient *rpcclient.GroupRpcClient
} }
func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase, func NewOnlineHistoryRedisConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase,
conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) { conversationRpcClient *rpcclient.ConversationRpcClient, groupRpcClient *rpcclient.GroupRpcClient) (*OnlineHistoryRedisConsumerHandler, error) {
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var och OnlineHistoryRedisConsumerHandler var och OnlineHistoryRedisConsumerHandler
och.msgDatabase = database och.msgTransferDatabase = database
b := batcher.New[sarama.ConsumerMessage]( b := batcher.New[sarama.ConsumerMessage](
batcher.WithSize(size), batcher.WithSize(size),
@ -161,7 +162,7 @@ func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context,
return return
} }
for key, seq := range readSeq { for key, seq := range readSeq {
if err := och.msgDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil { if err := och.msgTransferDatabase.SetHasReadSeqToDB(ctx, key.userID, key.conversationID, seq); err != nil {
log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq) log.ZError(ctx, "set read seq to db error", err, "userID", key.userID, "conversationID", key.conversationID, "seq", seq)
} }
} }
@ -237,6 +238,11 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [
} }
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) { func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
log.ZInfo(ctx, "handle storage msg")
for _, storageMsg := range storageList {
log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String())
}
och.toPushTopic(ctx, key, conversationID, notStorageList) och.toPushTopic(ctx, key, conversationID, notStorageList)
var storageMessageList []*sdkws.MsgData var storageMessageList []*sdkws.MsgData
for _, msg := range storageList { for _, msg := range storageList {
@ -244,21 +250,25 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
} }
if len(storageMessageList) > 0 { if len(storageMessageList) > 0 {
msg := storageMessageList[0] msg := storageMessageList[0]
lastSeq, isNewConversation, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) lastSeq, isNewConversation, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) { if err != nil && !errors.Is(errs.Unwrap(err), redis.Nil) {
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList) log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
return return
} }
log.ZInfo(ctx, "BatchInsertChat2Cache end")
if isNewConversation { if isNewConversation {
switch msg.SessionType { switch msg.SessionType {
case constant.ReadGroupChatType: case constant.ReadGroupChatType:
log.ZInfo(ctx, "group chat first create conversation", "conversationID", log.ZDebug(ctx, "group chat first create conversation", "conversationID",
conversationID) conversationID)
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID) userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
if err != nil { if err != nil {
log.ZWarn(ctx, "get group member ids error", err, "conversationID", log.ZWarn(ctx, "get group member ids error", err, "conversationID",
conversationID) conversationID)
} else { } else {
log.ZInfo(ctx, "GetGroupMemberIDs end")
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx, if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
msg.GroupID, userIDs); err != nil { msg.GroupID, userIDs); err != nil {
log.ZWarn(ctx, "single chat first create conversation error", err, log.ZWarn(ctx, "single chat first create conversation error", err,
@ -277,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
} }
} }
log.ZDebug(ctx, "success incr to next topic") log.ZInfo(ctx, "success incr to next topic")
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil { if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
} }
log.ZInfo(ctx, "MsgToMongoMQ end")
och.toPushTopic(ctx, key, conversationID, storageList) och.toPushTopic(ctx, key, conversationID, storageList)
log.ZInfo(ctx, "toPushTopic end")
} }
} }
@ -295,14 +308,14 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
storageMessageList = append(storageMessageList, msg.message) storageMessageList = append(storageMessageList, msg.message)
} }
if len(storageMessageList) > 0 { if len(storageMessageList) > 0 {
lastSeq, _, err := och.msgDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList) lastSeq, _, err := och.msgTransferDatabase.BatchInsertChat2Cache(ctx, conversationID, storageMessageList)
if err != nil { if err != nil {
log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID, log.ZError(ctx, "notification batch insert to redis error", err, "conversationID", conversationID,
"storageList", storageMessageList) "storageList", storageMessageList)
return return
} }
log.ZDebug(ctx, "success to next topic", "conversationID", conversationID) log.ZDebug(ctx, "success to next topic", "conversationID", conversationID)
err = och.msgDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq) err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil { if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID", log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq) conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
@ -311,9 +324,10 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
} }
} }
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(_ context.Context, key, conversationID string, msgs []*ContextMsg) { func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
for _, v := range msgs { for _, v := range msgs {
och.msgDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
} }
} }
@ -338,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset", log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition()) claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) { och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
session.MarkMessage(lastMessage, "") session.MarkMessage(lastMessage, "")

View File

@ -16,6 +16,7 @@ package msgtransfer
import ( import (
"context" "context"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
@ -28,10 +29,10 @@ import (
type OnlineHistoryMongoConsumerHandler struct { type OnlineHistoryMongoConsumerHandler struct {
historyConsumerGroup *kafka.MConsumerGroup historyConsumerGroup *kafka.MConsumerGroup
msgDatabase controller.CommonMsgDatabase msgTransferDatabase controller.MsgTransferDatabase
} }
func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.CommonMsgDatabase) (*OnlineHistoryMongoConsumerHandler, error) { func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) {
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true) historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true)
if err != nil { if err != nil {
return nil, err return nil, err
@ -39,7 +40,7 @@ func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database cont
mc := &OnlineHistoryMongoConsumerHandler{ mc := &OnlineHistoryMongoConsumerHandler{
historyConsumerGroup: historyConsumerGroup, historyConsumerGroup: historyConsumerGroup,
msgDatabase: database, msgTransferDatabase: database,
} }
return mc, nil return mc, nil
} }
@ -56,8 +57,8 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
return return
} }
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq) err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
if err != nil { if err != nil {
log.ZError( log.ZError(
ctx, ctx,
@ -76,7 +77,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
for _, msg := range msgFromMQ.MsgData { for _, msg := range msgFromMQ.MsgData {
seqs = append(seqs, msg.Seq) seqs = append(seqs, msg.Seq)
} }
err = mc.msgDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs) err = mc.msgTransferDatabase.DeleteMessagesFromCache(ctx, msgFromMQ.ConversationID, seqs)
if err != nil { if err != nil {
log.ZError( log.ZError(
ctx, ctx,

View File

@ -17,6 +17,7 @@ package dummy
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/tools/log"
) )
func NewClient() *Dummy { func NewClient() *Dummy {
@ -27,5 +28,6 @@ type Dummy struct {
} }
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error { func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error {
log.ZDebug(ctx, "dummy push")
return nil return nil
} }

View File

@ -18,11 +18,11 @@ import (
"context" "context"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"strconv" "strconv"
"sync" "sync"
"time" "time"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -91,6 +91,15 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
for i, v := range s.GetSplitResult() { for i, v := range s.GetSplitResult() {
go func(index int, userIDs []string) { go func(index int, userIDs []string) {
defer wg.Done() defer wg.Done()
for i := 0; i < len(userIDs); i += maxNum {
end := i + maxNum
if end > len(userIDs) {
end = len(userIDs)
}
if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
}
}
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil { if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq) log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
} }

View File

@ -0,0 +1,122 @@
package push
import (
"context"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/protocol/constant"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
)
type OfflinePushConsumerHandler struct {
OfflinePushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
}
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) {
var offlinePushConsumerHandler OfflinePushConsumerHandler
var err error
offlinePushConsumerHandler.offlinePusher = offlinePusher
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
if err != nil {
return nil, err
}
return &offlinePushConsumerHandler, nil
}
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
o.handleMsg2OfflinePush(ctx, msg.Value)
sess.MarkMessage(msg, "")
}
return nil
}
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
offlinePushMsg := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
return
}
if offlinePushMsg.MsgData == nil || offlinePushMsg.UserIDs == nil {
log.ZError(ctx, "offline push msg is empty", errs.New("offlinePushMsg is empty"), "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
return
}
log.ZInfo(ctx, "receive to OfflinePush MQ", "userIDs", offlinePushMsg.UserIDs, "msg", offlinePushMsg.MsgData)
err := o.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs)
if err != nil {
log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String())
}
}
func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
type AtTextElem struct {
Text string `json:"text,omitempty"`
AtUserList []string `json:"atUserList,omitempty"`
IsAtSelf bool `json:"isAtSelf"`
}
opts = &options.Opts{Signal: &options.Signal{}}
if msg.OfflinePushInfo != nil {
opts.IOSBadgeCount = msg.OfflinePushInfo.IOSBadgeCount
opts.IOSPushSound = msg.OfflinePushInfo.IOSPushSound
opts.Ex = msg.OfflinePushInfo.Ex
}
if msg.OfflinePushInfo != nil {
title = msg.OfflinePushInfo.Title
content = msg.OfflinePushInfo.Desc
}
if title == "" {
switch msg.ContentType {
case constant.Text:
fallthrough
case constant.Picture:
fallthrough
case constant.Voice:
fallthrough
case constant.Video:
fallthrough
case constant.File:
title = constant.ContentType2PushContent[int64(msg.ContentType)]
case constant.AtText:
ac := AtTextElem{}
_ = jsonutil.JsonStringToStruct(string(msg.Content), &ac)
case constant.SignalingNotification:
title = constant.ContentType2PushContent[constant.SignalMsg]
default:
title = constant.ContentType2PushContent[constant.Common]
}
}
if content == "" {
content = title
}
return
}
func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
title, content, opts, err := o.getOfflinePushInfos(msg)
if err != nil {
return err
}
err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
if err != nil {
prommetrics.MsgOfflinePushFailedCounter.Inc()
return err
}
return nil
}

View File

@ -27,12 +27,12 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) { pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
log.ZWarn(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil) log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
return nil, nil return nil, nil
} }
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string { wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
log.ZWarn(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil) log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
return nil return nil
} }

View File

@ -2,6 +2,7 @@ package push
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
@ -17,12 +18,12 @@ type pushServer struct {
disCov discovery.SvcDiscoveryRegistry disCov discovery.SvcDiscoveryRegistry
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
pushCh *ConsumerHandler pushCh *ConsumerHandler
offlinePushCh *OfflinePushConsumerHandler
} }
type Config struct { type Config struct {
RpcConfig config.Push RpcConfig config.Push
RedisConfig config.Redis RedisConfig config.Redis
MongodbConfig config.Mongo
KafkaConfig config.Kafka KafkaConfig config.Kafka
NotificationConfig config.Notification NotificationConfig config.Notification
Share config.Share Share config.Share
@ -55,18 +56,30 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
database := controller.NewPushDatabase(cacheModel)
consumer, err := NewConsumerHandler(config, offlinePusher, rdb, client) database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client)
if err != nil { if err != nil {
return err return err
} }
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher)
if err != nil {
return err
}
pbpush.RegisterPushMsgServiceServer(server, &pushServer{ pbpush.RegisterPushMsgServiceServer(server, &pushServer{
database: database, database: database,
disCov: client, disCov: client,
offlinePusher: offlinePusher, offlinePusher: offlinePusher,
pushCh: consumer, pushCh: consumer,
offlinePushCh: offlinePushConsumer,
}) })
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer) go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer)
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer)
return nil return nil
} }

View File

@ -1,33 +1,20 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push package push
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/IBM/sarama" "github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push" pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
@ -40,12 +27,16 @@ import (
"github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/tools/utils/timeutil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"math/rand"
"strconv"
"time"
) )
type ConsumerHandler struct { type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher onlinePusher OnlinePusher
pushDatabase controller.PushDatabase
onlineCache *rpccache.OnlineCache onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache conversationLocalCache *rpccache.ConversationLocalCache
@ -56,7 +47,7 @@ type ConsumerHandler struct {
config *Config config *Config
} }
func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) { client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
var consumerHandler ConsumerHandler var consumerHandler ConsumerHandler
var err error var err error
@ -65,7 +56,9 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil { if err != nil {
return nil, err return nil, err
} }
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID) userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
@ -75,42 +68,45 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config consumerHandler.config = config
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil) consumerHandler.pushDatabase = database
consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil)
if err != nil {
return nil, err
}
return &consumerHandler, nil return &consumerHandler, nil
} }
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
msgFromMQ := pbchat.PushMsgDataToMQ{} msgFromMQ := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil { if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg)) log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
return return
} }
pbData := &pbpush.PushMsgReq{
MsgData: msgFromMQ.MsgData,
ConversationID: msgFromMQ.ConversationID,
}
sec := msgFromMQ.MsgData.SendTime / 1000 sec := msgFromMQ.MsgData.SendTime / 1000
nowSec := timeutil.GetCurrentTimestampBySecond() nowSec := timeutil.GetCurrentTimestampBySecond()
if nowSec-sec > 10 { if nowSec-sec > 10 {
log.ZWarn(ctx, "long time push msg", nil, "msg", pbData.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec) prommetrics.MsgLoneTimePushCounter.Inc()
log.ZWarn(ctx, "its been a while since the message was sent", nil, "msg", msgFromMQ.String(), "sec", sec, "nowSec", nowSec, "nowSec-sec", nowSec-sec)
} }
var err error var err error
switch msgFromMQ.MsgData.SessionType { switch msgFromMQ.MsgData.SessionType {
case constant.ReadGroupChatType: case constant.ReadGroupChatType:
err = c.Push2Group(ctx, pbData.MsgData.GroupID, pbData.MsgData) err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData)
default: default:
var pushUserIDList []string var pushUserIDList []string
isSenderSync := datautil.GetSwitchFromOptions(pbData.MsgData.Options, constant.IsSenderSync) isSenderSync := datautil.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
if !isSenderSync || pbData.MsgData.SendID == pbData.MsgData.RecvID { if !isSenderSync || msgFromMQ.MsgData.SendID == msgFromMQ.MsgData.RecvID {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID) pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID)
} else { } else {
pushUserIDList = append(pushUserIDList, pbData.MsgData.RecvID, pbData.MsgData.SendID) pushUserIDList = append(pushUserIDList, msgFromMQ.MsgData.RecvID, msgFromMQ.MsgData.SendID)
} }
err = c.Push2User(ctx, pushUserIDList, pbData.MsgData) err = c.Push2User(ctx, pushUserIDList, msgFromMQ.MsgData)
} }
if err != nil { if err != nil {
log.ZWarn(ctx, "push failed", err, "msg", pbData.String()) log.ZWarn(ctx, "push failed", err, "msg", msgFromMQ.String())
} }
} }
@ -119,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil } func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
c.onlineCache.Lock.Lock()
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
c.onlineCache.Cond.Wait()
}
c.onlineCache.Lock.Unlock()
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
log.ZInfo(ctx, "begin consume messages")
for msg := range claim.Messages() { for msg := range claim.Messages() {
ctx := c.pushConsumerGroup.GetContextFromMsg(msg) ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
c.handleMs2PsChat(ctx, msg.Value) c.handleMs2PsChat(ctx, msg.Value)
@ -129,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t)
}(time.Now())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeOnlinePush end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs) log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
if !c.shouldPushOffline(ctx, msg) { if !c.shouldPushOffline(ctx, msg) {
return nil return nil
} }
log.ZInfo(ctx, "shouldPushOffline end")
for _, v := range wsResults { for _, v := range wsResults {
//message sender do not need offline push //message sender do not need offline push
@ -161,7 +172,7 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
offlinePushUserID, msg, nil); err != nil { offlinePushUserID, msg, nil); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeOfflinePush end")
err = c.offlinePushMsg(ctx, msg, offlinePushUserID) err = c.offlinePushMsg(ctx, msg, offlinePushUserID)
if err != nil { if err != nil {
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg) log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg)
@ -183,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
} }
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) { func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var ( onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs)
onlineUserIDs []string
offlineUserIDs []string
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs) log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs)
var result []*msggateway.SingleMsgToUserResults var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 { if len(onlineUserIDs) > 0 {
@ -216,57 +217,70 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.
} }
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
defer func(duration time.Time) {
t := time.Since(duration)
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t)
}(time.Now())
var pushToUserIDs []string var pushToUserIDs []string
if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg, if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg,
&pushToUserIDs); err != nil { &pushToUserIDs); err != nil {
return err return err
} }
log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end")
err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg) err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg)
if err != nil { if err != nil {
return err return err
} }
log.ZInfo(ctx, "groupMessagesHandler end")
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg) log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg)
if !c.shouldPushOffline(ctx, msg) { if !c.shouldPushOffline(ctx, msg) {
return nil return nil
} }
needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs) needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs)
log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end")
//filter some user, like don not disturb or don't need offline push etc. //filter some user, like don not disturb or don't need offline push etc.
needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs) needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs)
if err != nil { if err != nil {
return err return err
} }
log.ZInfo(ctx, "filterGroupMessageOfflinePush end")
// Use offline push messaging // Use offline push messaging
if len(needOfflinePushUserIDs) > 0 { if len(needOfflinePushUserIDs) > 0 {
c.asyncOfflinePush(ctx, needOfflinePushUserIDs, msg)
}
return nil
}
func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) {
var offlinePushUserIDs []string var offlinePushUserIDs []string
err = c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs) err := c.webhookBeforeOfflinePush(ctx, &c.config.WebhooksConfig.BeforeOfflinePush, needOfflinePushUserIDs, msg, &offlinePushUserIDs)
if err != nil { if err != nil {
return err log.ZWarn(ctx, "webhookBeforeOfflinePush failed", err, "msg", msg)
return
} }
if len(offlinePushUserIDs) > 0 { if len(offlinePushUserIDs) > 0 {
needOfflinePushUserIDs = offlinePushUserIDs needOfflinePushUserIDs = offlinePushUserIDs
} }
if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil {
err = c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs) log.ZError(ctx, "Msg To OfflinePush MQ error", err, "needOfflinePushUserIDs",
if err != nil { needOfflinePushUserIDs, "msg", msg)
log.ZWarn(ctx, "offlinePushMsg failed", err, "groupID", groupID, "msg", msg) prommetrics.SingleChatMsgProcessFailedCounter.Inc()
return nil return
}
} }
}
return nil
}
func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) {
if len(*pushToUserIDs) == 0 { if len(*pushToUserIDs) == 0 {
*pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID) *pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID)
@ -300,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri
if unmarshalNotificationElem(msg.Content, &tips) != nil { if unmarshalNotificationElem(msg.Content, &tips) != nil {
return err return err
} }
log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs) log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
if len(c.config.Share.IMAdminUserID) > 0 { if len(c.config.Share.IMAdminUserID) > 0 {
ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0]) ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0])
} }
@ -384,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten
} }
return return
} }
func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error { func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error {
conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID) conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID) maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID)
@ -392,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context,
} }
return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq) return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq)
} }
func unmarshalNotificationElem(bytes []byte, t any) error { func unmarshalNotificationElem(bytes []byte, t any) error {
var notification sdkws.NotificationElem var notification sdkws.NotificationElem
if err := json.Unmarshal(bytes, &notification); err != nil { if err := json.Unmarshal(bytes, &notification); err != nil {

View File

@ -278,8 +278,8 @@ func (c *conversationServer) SetConversations(ctx context.Context, req *pbconver
if req.Conversation.MsgDestructTime != nil { if req.Conversation.MsgDestructTime != nil {
m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value
} }
if req.Conversation.MsgDestructTime != nil { if req.Conversation.IsMsgDestruct != nil {
m["msg_destruct_time"] = req.Conversation.MsgDestructTime.Value m["is_msg_destruct"] = req.Conversation.IsMsgDestruct.Value
} }
if req.Conversation.BurnDuration != nil { if req.Conversation.BurnDuration != nil {
m["burn_duration"] = req.Conversation.BurnDuration.Value m["burn_duration"] = req.Conversation.BurnDuration.Value

View File

@ -359,73 +359,73 @@ func (s *groupServer) webhookAfterSetGroupInfo(ctx context.Context, after *confi
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after) s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoResp{}, after)
} }
func (s *groupServer) webhookBeforeSetGroupInfoEX(ctx context.Context, before *config.BeforeConfig, req *group.SetGroupInfoEXReq) error { func (s *groupServer) webhookBeforeSetGroupInfoEx(ctx context.Context, before *config.BeforeConfig, req *group.SetGroupInfoExReq) error {
return webhook.WithCondition(ctx, before, func(ctx context.Context) error { return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &callbackstruct.CallbackBeforeSetGroupInfoEXReq{ cbReq := &callbackstruct.CallbackBeforeSetGroupInfoExReq{
CallbackCommand: callbackstruct.CallbackBeforeSetGroupInfoCommand, CallbackCommand: callbackstruct.CallbackBeforeSetGroupInfoExCommand,
GroupID: req.GroupInfoForSetEX.GroupID, GroupID: req.GroupID,
GroupName: req.GroupInfoForSetEX.GroupName, GroupName: req.GroupName,
Notification: req.GroupInfoForSetEX.Notification, Notification: req.Notification,
Introduction: req.GroupInfoForSetEX.Introduction, Introduction: req.Introduction,
FaceURL: req.GroupInfoForSetEX.FaceURL, FaceURL: req.FaceURL,
} }
if req.GroupInfoForSetEX.Ex != nil { if req.Ex != nil {
cbReq.Ex = req.GroupInfoForSetEX.Ex cbReq.Ex = req.Ex
} }
log.ZDebug(ctx, "debug CallbackBeforeSetGroupInfoEX", "ex", cbReq.Ex) log.ZDebug(ctx, "debug CallbackBeforeSetGroupInfoEX", "ex", cbReq.Ex)
if req.GroupInfoForSetEX.NeedVerification != nil { if req.NeedVerification != nil {
cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification cbReq.NeedVerification = req.NeedVerification
} }
if req.GroupInfoForSetEX.LookMemberInfo != nil { if req.LookMemberInfo != nil {
cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo cbReq.LookMemberInfo = req.LookMemberInfo
} }
if req.GroupInfoForSetEX.ApplyMemberFriend != nil { if req.ApplyMemberFriend != nil {
cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend cbReq.ApplyMemberFriend = req.ApplyMemberFriend
} }
resp := &callbackstruct.CallbackBeforeSetGroupInfoEXResp{} resp := &callbackstruct.CallbackBeforeSetGroupInfoExResp{}
if err := s.webhookClient.SyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, before); err != nil { if err := s.webhookClient.SyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, resp, before); err != nil {
return err return err
} }
datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupID, &resp.GroupID) datautil.NotNilReplace(&req.GroupID, &resp.GroupID)
datautil.NotNilReplace(&req.GroupInfoForSetEX.GroupName, &resp.GroupName) datautil.NotNilReplace(&req.GroupName, &resp.GroupName)
datautil.NotNilReplace(&req.GroupInfoForSetEX.FaceURL, &resp.FaceURL) datautil.NotNilReplace(&req.FaceURL, &resp.FaceURL)
datautil.NotNilReplace(&req.GroupInfoForSetEX.Introduction, &resp.Introduction) datautil.NotNilReplace(&req.Introduction, &resp.Introduction)
datautil.NotNilReplace(&req.GroupInfoForSetEX.Ex, &resp.Ex) datautil.NotNilReplace(&req.Ex, &resp.Ex)
datautil.NotNilReplace(&req.GroupInfoForSetEX.NeedVerification, &resp.NeedVerification) datautil.NotNilReplace(&req.NeedVerification, &resp.NeedVerification)
datautil.NotNilReplace(&req.GroupInfoForSetEX.LookMemberInfo, &resp.LookMemberInfo) datautil.NotNilReplace(&req.LookMemberInfo, &resp.LookMemberInfo)
datautil.NotNilReplace(&req.GroupInfoForSetEX.ApplyMemberFriend, &resp.ApplyMemberFriend) datautil.NotNilReplace(&req.ApplyMemberFriend, &resp.ApplyMemberFriend)
return nil return nil
}) })
} }
func (s *groupServer) webhookAfterSetGroupInfoEX(ctx context.Context, after *config.AfterConfig, req *group.SetGroupInfoEXReq) { func (s *groupServer) webhookAfterSetGroupInfoEx(ctx context.Context, after *config.AfterConfig, req *group.SetGroupInfoExReq) {
cbReq := &callbackstruct.CallbackAfterSetGroupInfoEXReq{ cbReq := &callbackstruct.CallbackAfterSetGroupInfoExReq{
CallbackCommand: callbackstruct.CallbackAfterSetGroupInfoCommand, CallbackCommand: callbackstruct.CallbackAfterSetGroupInfoExCommand,
GroupID: req.GroupInfoForSetEX.GroupID, GroupID: req.GroupID,
GroupName: req.GroupInfoForSetEX.GroupName, GroupName: req.GroupName,
Notification: req.GroupInfoForSetEX.Notification, Notification: req.Notification,
Introduction: req.GroupInfoForSetEX.Introduction, Introduction: req.Introduction,
FaceURL: req.GroupInfoForSetEX.FaceURL, FaceURL: req.FaceURL,
} }
if req.GroupInfoForSetEX.Ex != nil { if req.Ex != nil {
cbReq.Ex = req.GroupInfoForSetEX.Ex cbReq.Ex = req.Ex
} }
if req.GroupInfoForSetEX.NeedVerification != nil { if req.NeedVerification != nil {
cbReq.NeedVerification = req.GroupInfoForSetEX.NeedVerification cbReq.NeedVerification = req.NeedVerification
} }
if req.GroupInfoForSetEX.LookMemberInfo != nil { if req.LookMemberInfo != nil {
cbReq.LookMemberInfo = req.GroupInfoForSetEX.LookMemberInfo cbReq.LookMemberInfo = req.LookMemberInfo
} }
if req.GroupInfoForSetEX.ApplyMemberFriend != nil { if req.ApplyMemberFriend != nil {
cbReq.ApplyMemberFriend = req.GroupInfoForSetEX.ApplyMemberFriend cbReq.ApplyMemberFriend = req.ApplyMemberFriend
} }
s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoEXResp{}, after) s.webhookClient.AsyncPost(ctx, cbReq.GetCallbackCommand(), cbReq, &callbackstruct.CallbackAfterSetGroupInfoExResp{}, after)
} }

View File

@ -20,6 +20,7 @@ import (
pbgroup "github.com/openimsdk/protocol/group" pbgroup "github.com/openimsdk/protocol/group"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
) )
@ -54,11 +55,11 @@ func UpdateGroupInfoMap(ctx context.Context, group *sdkws.GroupInfoForSet) map[s
return m return m
} }
func UpdateGroupInfoEXMap(ctx context.Context, group *sdkws.GroupInfoForSetEX) map[string]any { func UpdateGroupInfoExMap(ctx context.Context, group *pbgroup.SetGroupInfoExReq) (map[string]any, error) {
m := make(map[string]any) m := make(map[string]any)
if group.GroupName != "" { if group.GroupName != nil && group.GroupName.Value != "" {
m["group_name"] = group.GroupName return nil, errs.ErrArgs.WrapMsg("group name is empty")
} }
if group.Notification != nil { if group.Notification != nil {
m["notification"] = group.Notification.Value m["notification"] = group.Notification.Value
@ -84,7 +85,7 @@ func UpdateGroupInfoEXMap(ctx context.Context, group *sdkws.GroupInfoForSetEX) m
m["ex"] = group.Ex.Value m["ex"] = group.Ex.Value
} }
return m return m, nil
} }
func UpdateGroupStatusMap(status int) map[string]any { func UpdateGroupStatusMap(status int) map[string]any {

View File

@ -304,6 +304,13 @@ func (g *groupServer) CreateGroup(ctx context.Context, req *pbgroup.CreateGroupR
} }
g.notification.GroupCreatedNotification(ctx, tips) g.notification.GroupCreatedNotification(ctx, tips)
if req.GroupInfo.Notification != "" {
g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{
Group: tips.Group,
OpUser: tips.OpUser,
})
}
reqCallBackAfter := &pbgroup.CreateGroupReq{ reqCallBackAfter := &pbgroup.CreateGroupReq{
MemberUserIDs: userIDs, MemberUserIDs: userIDs,
GroupInfo: resp.GroupInfo, GroupInfo: resp.GroupInfo,
@ -833,7 +840,7 @@ func (g *groupServer) GroupApplicationResponse(ctx context.Context, req *pbgroup
if member == nil { if member == nil {
log.ZDebug(ctx, "GroupApplicationResponse", "member is nil") log.ZDebug(ctx, "GroupApplicationResponse", "member is nil")
} else { } else {
if err = g.notification.MemberEnterNotification(ctx, req.GroupID, req.FromUserID); err != nil { if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, groupRequest.InviterUserID, req.FromUserID); err != nil {
return nil, err return nil, err
} }
} }
@ -1028,12 +1035,12 @@ func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf
} }
resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID}) resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSet.GroupID})
if err != nil { if err != nil {
log.ZWarn(ctx, "GetGroupMemberIDs", err) log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err)
return return
} }
conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification}
if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil {
log.ZWarn(ctx, "SetConversations", err, resp.UserIDs, conversation) log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation)
} }
}() }()
g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser})
@ -1051,13 +1058,13 @@ func (g *groupServer) SetGroupInfo(ctx context.Context, req *pbgroup.SetGroupInf
return &pbgroup.SetGroupInfoResp{}, nil return &pbgroup.SetGroupInfoResp{}, nil
} }
func (g *groupServer) SetGroupInfoEX(ctx context.Context, req *pbgroup.SetGroupInfoEXReq) (*pbgroup.SetGroupInfoEXResp, error) { func (g *groupServer) SetGroupInfoEx(ctx context.Context, req *pbgroup.SetGroupInfoExReq) (*pbgroup.SetGroupInfoExResp, error) {
var opMember *model.GroupMember var opMember *model.GroupMember
if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) { if !authverify.IsAppManagerUid(ctx, g.config.Share.IMAdminUserID) {
var err error var err error
opMember, err = g.db.TakeGroupMember(ctx, req.GroupInfoForSetEX.GroupID, mcontext.GetOpUserID(ctx)) opMember, err = g.db.TakeGroupMember(ctx, req.GroupID, mcontext.GetOpUserID(ctx))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1071,11 +1078,11 @@ func (g *groupServer) SetGroupInfoEX(ctx context.Context, req *pbgroup.SetGroupI
} }
} }
if err := g.webhookBeforeSetGroupInfoEX(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfoEX, req); err != nil && err != servererrs.ErrCallbackContinue { if err := g.webhookBeforeSetGroupInfoEx(ctx, &g.config.WebhooksConfig.BeforeSetGroupInfoEx, req); err != nil && err != servererrs.ErrCallbackContinue {
return nil, err return nil, err
} }
group, err := g.db.TakeGroup(ctx, req.GroupInfoForSetEX.GroupID) group, err := g.db.TakeGroup(ctx, req.GroupID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1097,16 +1104,20 @@ func (g *groupServer) SetGroupInfoEX(ctx context.Context, req *pbgroup.SetGroupI
return nil, err return nil, err
} }
updatedData := UpdateGroupInfoEXMap(ctx, req.GroupInfoForSetEX) updatedData, err := UpdateGroupInfoExMap(ctx, req)
if len(updatedData) == 0 { if len(updatedData) == 0 {
return &pbgroup.SetGroupInfoEXResp{}, nil return &pbgroup.SetGroupInfoExResp{}, nil
}
if err != nil {
return nil, err
} }
if err := g.db.UpdateGroup(ctx, group.GroupID, updatedData); err != nil { if err := g.db.UpdateGroup(ctx, group.GroupID, updatedData); err != nil {
return nil, err return nil, err
} }
group, err = g.db.TakeGroup(ctx, req.GroupInfoForSetEX.GroupID) group, err = g.db.TakeGroup(ctx, req.GroupID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1122,43 +1133,46 @@ func (g *groupServer) SetGroupInfoEX(ctx context.Context, req *pbgroup.SetGroupI
} }
num := len(updatedData) num := len(updatedData)
if req.GroupInfoForSetEX.Notification != nil { if req.Notification != nil {
num-- num--
if req.Notification.Value != "" {
func() { func() {
conversation := &pbconversation.ConversationReq{ conversation := &pbconversation.ConversationReq{
ConversationID: msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupInfoForSetEX.GroupID), ConversationID: msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, req.GroupID),
ConversationType: constant.ReadGroupChatType, ConversationType: constant.ReadGroupChatType,
GroupID: req.GroupInfoForSetEX.GroupID, GroupID: req.GroupID,
} }
resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupInfoForSetEX.GroupID}) resp, err := g.GetGroupMemberUserIDs(ctx, &pbgroup.GetGroupMemberUserIDsReq{GroupID: req.GroupID})
if err != nil { if err != nil {
log.ZWarn(ctx, "GetGroupMemberIDs", err) log.ZWarn(ctx, "GetGroupMemberIDs is failed.", err)
return return
} }
conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification} conversation.GroupAtType = &wrapperspb.Int32Value{Value: constant.GroupNotification}
if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil { if err := g.conversationRpcClient.SetConversations(ctx, resp.UserIDs, conversation); err != nil {
log.ZWarn(ctx, "SetConversations", err, resp.UserIDs, conversation) log.ZWarn(ctx, "SetConversations", err, "UserIDs", resp.UserIDs, "conversation", conversation)
} }
}() }()
g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser}) g.notification.GroupInfoSetAnnouncementNotification(ctx, &sdkws.GroupInfoSetAnnouncementTips{Group: tips.Group, OpUser: tips.OpUser})
} }
if req.GroupInfoForSetEX.GroupName != "" { }
num--
if req.GroupName != nil {
num--
g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser}) g.notification.GroupInfoSetNameNotification(ctx, &sdkws.GroupInfoSetNameTips{Group: tips.Group, OpUser: tips.OpUser})
} }
if num > 0 { if num > 0 {
g.notification.GroupInfoSetNotification(ctx, tips) g.notification.GroupInfoSetNotification(ctx, tips)
} }
g.webhookAfterSetGroupInfoEX(ctx, &g.config.WebhooksConfig.AfterSetGroupInfoEX, req) g.webhookAfterSetGroupInfoEx(ctx, &g.config.WebhooksConfig.AfterSetGroupInfoEx, req)
return &pbgroup.SetGroupInfoEXResp{}, nil return &pbgroup.SetGroupInfoExResp{}, nil
} }
func (g *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) { func (g *groupServer) TransferGroupOwner(ctx context.Context, req *pbgroup.TransferGroupOwnerReq) (*pbgroup.TransferGroupOwnerResp, error) {

View File

@ -16,27 +16,28 @@ package group
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/versionctx"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
"github.com/openimsdk/protocol/msg"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient/notification"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbgroup "github.com/openimsdk/protocol/group" pbgroup "github.com/openimsdk/protocol/group"
"github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"go.mongodb.org/mongo-driver/mongo"
) )
// GroupApplicationReceiver // GroupApplicationReceiver
@ -227,10 +228,13 @@ func (g *GroupNotificationSender) groupMemberDB2PB(member *model.GroupMember, ap
} */ } */
func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) { func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws.GroupMemberFullInfo, groupID string) (err error) {
return g.fillOpUserByUserID(ctx, mcontext.GetOpUserID(ctx), opUser, groupID)
}
func (g *GroupNotificationSender) fillOpUserByUserID(ctx context.Context, userID string, opUser **sdkws.GroupMemberFullInfo, groupID string) error {
if opUser == nil { if opUser == nil {
return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil") return errs.ErrInternalServer.WrapMsg("**sdkws.GroupMemberFullInfo is nil")
} }
userID := mcontext.GetOpUserID(ctx)
if groupID != "" { if groupID != "" {
if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) { if authverify.IsManagerUserID(userID, g.config.Share.IMAdminUserID) {
*opUser = &sdkws.GroupMemberFullInfo{ *opUser = &sdkws.GroupMemberFullInfo{
@ -243,7 +247,7 @@ func (g *GroupNotificationSender) fillOpUser(ctx context.Context, opUser **sdkws
member, err := g.db.TakeGroupMember(ctx, groupID, userID) member, err := g.db.TakeGroupMember(ctx, groupID, userID)
if err == nil { if err == nil {
*opUser = g.groupMemberDB2PB(member, 0) *opUser = g.groupMemberDB2PB(member, 0)
} else if !errs.ErrRecordNotFound.Is(err) { } else if !(errors.Is(err, mongo.ErrNoDocuments) || errs.ErrRecordNotFound.Is(err)) {
return err return err
} }
} }
@ -509,7 +513,7 @@ func (g *GroupNotificationSender) MemberKickedNotification(ctx context.Context,
g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips)
} }
func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID ...string) error { func (g *GroupNotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error {
var err error var err error
defer func() { defer func() {
if err != nil { if err != nil {
@ -546,15 +550,32 @@ func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, g
return err return err
} }
tips := &sdkws.MemberInvitedTips{Group: group, InvitedUserList: users} tips := &sdkws.MemberInvitedTips{
if err = g.fillOpUser(ctx, &tips.OpUser, tips.Group.GroupID); err != nil { Group: group,
InvitedUserList: users,
}
opUserID := mcontext.GetOpUserID(ctx)
if err = g.fillOpUserByUserID(ctx, opUserID, &tips.OpUser, tips.Group.GroupID); err != nil {
return nil return nil
} }
switch {
case invitedOpUserID == "":
case invitedOpUserID == opUserID:
tips.InviterUser = tips.OpUser
default:
if err = g.fillOpUserByUserID(ctx, invitedOpUserID, &tips.InviterUser, tips.Group.GroupID); err != nil {
return err
}
}
g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID) g.setVersion(ctx, &tips.GroupMemberVersion, &tips.GroupMemberVersionID, database.GroupMemberVersionName, tips.Group.GroupID)
g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips) g.Notification(ctx, mcontext.GetOpUserID(ctx), group.GroupID, constant.MemberInvitedNotification, tips)
return nil return nil
} }
func (g *GroupNotificationSender) MemberEnterNotification(ctx context.Context, groupID string, entrantUserID ...string) error {
return g.GroupApplicationAgreeMemberEnterNotification(ctx, groupID, "", entrantUserID...)
}
func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) { func (g *GroupNotificationSender) GroupDismissedNotification(ctx context.Context, tips *sdkws.GroupDismissedTips) {
var err error var err error
defer func() { defer func() {

View File

@ -67,7 +67,7 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.
return nil, err return nil, err
} }
log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start)) log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
return &msg.ClearMsgResp{}, nil return &msg.ClearMsgResp{}, nil
} }

View File

@ -16,16 +16,16 @@ package msg
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/timeutil"
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/timeutil"
) )
func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) { func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessageBySeqsReq) (*sdkws.PullMessageBySeqsResp, error) {
@ -86,6 +86,35 @@ func (m *msgServer) PullMessageBySeqs(ctx context.Context, req *sdkws.PullMessag
return resp, nil return resp, nil
} }
func (m *msgServer) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) {
resp := &msg.GetSeqMessageResp{
Msgs: make(map[string]*sdkws.PullMsgs),
NotificationMsgs: make(map[string]*sdkws.PullMsgs),
}
for _, conv := range req.Conversations {
_, _, msgs, err := m.MsgDatabase.GetMsgBySeqs(ctx, req.UserID, conv.ConversationID, conv.Seqs)
if err != nil {
return nil, err
}
var pullMsgs *sdkws.PullMsgs
if ok := false; conversationutil.IsNotificationConversationID(conv.ConversationID) {
pullMsgs, ok = resp.NotificationMsgs[conv.ConversationID]
if !ok {
pullMsgs = &sdkws.PullMsgs{}
resp.NotificationMsgs[conv.ConversationID] = pullMsgs
}
} else {
pullMsgs, ok = resp.Msgs[conv.ConversationID]
if !ok {
pullMsgs = &sdkws.PullMsgs{}
resp.Msgs[conv.ConversationID] = pullMsgs
}
}
pullMsgs.Msgs = append(pullMsgs.Msgs, msgs...)
}
return resp, nil
}
func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) { func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sdkws.GetMaxSeqResp, error) {
if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil { if err := authverify.CheckAccessV3(ctx, req.UserID, m.config.Share.IMAdminUserID); err != nil {
return nil, err return nil, err
@ -104,13 +133,20 @@ func (m *msgServer) GetMaxSeq(ctx context.Context, req *sdkws.GetMaxSeqReq) (*sd
log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs) log.ZWarn(ctx, "GetMaxSeqs error", err, "conversationIDs", conversationIDs, "maxSeqs", maxSeqs)
return nil, err return nil, err
} }
// avoid pulling messages from sessions with a large number of max seq values of 0
for conversationID, seq := range maxSeqs {
if seq == 0 {
delete(maxSeqs, conversationID)
}
}
resp := new(sdkws.GetMaxSeqResp) resp := new(sdkws.GetMaxSeqResp)
resp.MaxSeqs = maxSeqs resp.MaxSeqs = maxSeqs
return resp, nil return resp, nil
} }
func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (resp *msg.SearchMessageResp, err error) { func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq) (resp *msg.SearchMessageResp, err error) {
var chatLogs []*sdkws.MsgData // var chatLogs []*sdkws.MsgData
var chatLogs []*msg.SearchedMsgData
var total int64 var total int64
resp = &msg.SearchMessageResp{} resp = &msg.SearchMessageResp{}
if total, chatLogs, err = m.MsgDatabase.SearchMessage(ctx, req); err != nil { if total, chatLogs, err = m.MsgDatabase.SearchMessage(ctx, req); err != nil {
@ -125,17 +161,19 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq
recvMap = make(map[string]string) recvMap = make(map[string]string)
groupMap = make(map[string]*sdkws.GroupInfo) groupMap = make(map[string]*sdkws.GroupInfo)
) )
for _, chatLog := range chatLogs { for _, chatLog := range chatLogs {
if chatLog.SenderNickname == "" { if chatLog.MsgData.SenderNickname == "" {
sendIDs = append(sendIDs, chatLog.SendID) sendIDs = append(sendIDs, chatLog.MsgData.SendID)
} }
switch chatLog.SessionType { switch chatLog.MsgData.SessionType {
case constant.SingleChatType, constant.NotificationChatType: case constant.SingleChatType, constant.NotificationChatType:
recvIDs = append(recvIDs, chatLog.RecvID) recvIDs = append(recvIDs, chatLog.MsgData.RecvID)
case constant.WriteGroupChatType, constant.ReadGroupChatType: case constant.WriteGroupChatType, constant.ReadGroupChatType:
groupIDs = append(groupIDs, chatLog.GroupID) groupIDs = append(groupIDs, chatLog.MsgData.GroupID)
} }
} }
// Retrieve sender and receiver information // Retrieve sender and receiver information
if len(sendIDs) != 0 { if len(sendIDs) != 0 {
sendInfos, err := m.UserLocalCache.GetUsersInfo(ctx, sendIDs) sendInfos, err := m.UserLocalCache.GetUsersInfo(ctx, sendIDs)
@ -146,6 +184,7 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq
sendMap[sendInfo.UserID] = sendInfo.Nickname sendMap[sendInfo.UserID] = sendInfo.Nickname
} }
} }
if len(recvIDs) != 0 { if len(recvIDs) != 0 {
recvInfos, err := m.UserLocalCache.GetUsersInfo(ctx, recvIDs) recvInfos, err := m.UserLocalCache.GetUsersInfo(ctx, recvIDs)
if err != nil { if err != nil {
@ -171,20 +210,21 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq
} }
} }
} }
// Construct response with updated information // Construct response with updated information
for _, chatLog := range chatLogs { for _, chatLog := range chatLogs {
pbchatLog := &msg.ChatLog{} pbchatLog := &msg.ChatLog{}
datautil.CopyStructFields(pbchatLog, chatLog) datautil.CopyStructFields(pbchatLog, chatLog.MsgData)
pbchatLog.SendTime = chatLog.SendTime pbchatLog.SendTime = chatLog.MsgData.SendTime
pbchatLog.CreateTime = chatLog.CreateTime pbchatLog.CreateTime = chatLog.MsgData.CreateTime
if chatLog.SenderNickname == "" { if chatLog.MsgData.SenderNickname == "" {
pbchatLog.SenderNickname = sendMap[chatLog.SendID] pbchatLog.SenderNickname = sendMap[chatLog.MsgData.SendID]
} }
switch chatLog.SessionType { switch chatLog.MsgData.SessionType {
case constant.SingleChatType, constant.NotificationChatType: case constant.SingleChatType, constant.NotificationChatType:
pbchatLog.RecvNickname = recvMap[chatLog.RecvID] pbchatLog.RecvNickname = recvMap[chatLog.MsgData.RecvID]
case constant.WriteGroupChatType, constant.ReadGroupChatType: case constant.ReadGroupChatType:
groupInfo := groupMap[chatLog.GroupID] groupInfo := groupMap[chatLog.MsgData.GroupID]
pbchatLog.SenderFaceURL = groupInfo.FaceURL pbchatLog.SenderFaceURL = groupInfo.FaceURL
pbchatLog.GroupMemberCount = groupInfo.MemberCount // Reflects actual member count pbchatLog.GroupMemberCount = groupInfo.MemberCount // Reflects actual member count
pbchatLog.RecvID = groupInfo.GroupID pbchatLog.RecvID = groupInfo.GroupID
@ -192,7 +232,9 @@ func (m *msgServer) SearchMessage(ctx context.Context, req *msg.SearchMessageReq
pbchatLog.GroupOwner = groupInfo.OwnerUserID pbchatLog.GroupOwner = groupInfo.OwnerUserID
pbchatLog.GroupType = groupInfo.GroupType pbchatLog.GroupType = groupInfo.GroupType
} }
resp.ChatLogs = append(resp.ChatLogs, pbchatLog) searchChatLog := &msg.SearchChatLog{ChatLog: pbchatLog, IsRevoked: chatLog.IsRevoked}
resp.ChatLogs = append(resp.ChatLogs, searchChatLog)
} }
resp.ChatLogsNum = int32(total) resp.ChatLogsNum = int32(total)
return resp, nil return resp, nil

View File

@ -312,16 +312,20 @@ func (s *friendServer) GetPaginationFriendsApplyTo(ctx context.Context, req *rel
if err := s.userRpcClient.Access(ctx, req.UserID); err != nil { if err := s.userRpcClient.Access(ctx, req.UserID); err != nil {
return nil, err return nil, err
} }
total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination) total, friendRequests, err := s.db.PageFriendRequestToMe(ctx, req.UserID, req.Pagination)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp = &relation.GetPaginationFriendsApplyToResp{} resp = &relation.GetPaginationFriendsApplyToResp{}
resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap) resp.FriendRequests, err = convert.FriendRequestDB2Pb(ctx, friendRequests, s.userRpcClient.GetUsersInfoMap)
if err != nil { if err != nil {
return nil, err return nil, err
} }
resp.Total = int32(total) resp.Total = int32(total)
return resp, nil return resp, nil
} }

View File

@ -17,9 +17,10 @@ package third
import ( import (
"context" "context"
"crypto/rand" "crypto/rand"
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"time" "time"
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"

View File

@ -16,6 +16,7 @@ package user
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
@ -88,7 +89,6 @@ func (s *userServer) webhookBeforeUserRegister(ctx context.Context, before *conf
return webhook.WithCondition(ctx, before, func(ctx context.Context) error { return webhook.WithCondition(ctx, before, func(ctx context.Context) error {
cbReq := &cbapi.CallbackBeforeUserRegisterReq{ cbReq := &cbapi.CallbackBeforeUserRegisterReq{
CallbackCommand: cbapi.CallbackBeforeUserRegisterCommand, CallbackCommand: cbapi.CallbackBeforeUserRegisterCommand,
Secret: req.Secret,
Users: req.Users, Users: req.Users,
} }
@ -108,7 +108,6 @@ func (s *userServer) webhookBeforeUserRegister(ctx context.Context, before *conf
func (s *userServer) webhookAfterUserRegister(ctx context.Context, after *config.AfterConfig, req *pbuser.UserRegisterReq) { func (s *userServer) webhookAfterUserRegister(ctx context.Context, after *config.AfterConfig, req *pbuser.UserRegisterReq) {
cbReq := &cbapi.CallbackAfterUserRegisterReq{ cbReq := &cbapi.CallbackAfterUserRegisterReq{
CallbackCommand: cbapi.CallbackAfterUserRegisterCommand, CallbackCommand: cbapi.CallbackAfterUserRegisterCommand,
Secret: req.Secret,
Users: req.Users, Users: req.Users,
} }

View File

@ -2,6 +2,8 @@ package user
import ( import (
"context" "context"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbuser "github.com/openimsdk/protocol/user" pbuser "github.com/openimsdk/protocol/user"
) )
@ -80,3 +82,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse
} }
return &pbuser.SetUserOnlineStatusResp{}, nil return &pbuser.SetUserOnlineStatusResp{}, nil
} }
func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) {
resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor)
if err != nil {
return nil, err
}
resp := &pbuser.GetAllOnlineUsersResp{
StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)),
NextCursor: nextCursor,
}
for userID, plats := range resMap {
resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{
UserID: userID,
Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)),
PlatformIDs: plats,
})
}
return resp, nil
}

View File

@ -47,7 +47,6 @@ import (
"github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/db/pagination"
registry "github.com/openimsdk/tools/discovery" registry "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -263,10 +262,11 @@ func (s *userServer) UserRegister(ctx context.Context, req *pbuser.UserRegisterR
if len(req.Users) == 0 { if len(req.Users) == 0 {
return nil, errs.ErrArgs.WrapMsg("users is empty") return nil, errs.ErrArgs.WrapMsg("users is empty")
} }
if req.Secret != s.config.Share.Secret {
log.ZDebug(ctx, "UserRegister", s.config.Share.Secret, req.Secret) if err = authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil {
return nil, errs.ErrNoPermission.WrapMsg("secret invalid") return nil, err
} }
if datautil.DuplicateAny(req.Users, func(e *sdkws.UserInfo) string { return e.UserID }) { if datautil.DuplicateAny(req.Users, func(e *sdkws.UserInfo) string { return e.UserID }) {
return nil, errs.ErrArgs.WrapMsg("userID repeated") return nil, errs.ErrArgs.WrapMsg("userID repeated")
} }

View File

@ -79,13 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
now := time.Now() now := time.Now()
deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords)) deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli())) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli()))
log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli()) log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil { if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now)) log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now))
return return
} }
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now)) log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
} }
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil { if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
msgDestructFunc := func() { msgDestructFunc := func() {
now := time.Now() now := time.Now()
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli())) ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli()))
log.ZInfo(ctx, "msg destruct cron start", "now", now) log.ZDebug(ctx, "msg destruct cron start", "now", now)
conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{}) conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{})
if err != nil { if err != nil {
@ -108,7 +108,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
return return
} }
} }
log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now)) log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now))
} }
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil { if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
@ -119,18 +119,18 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
// now := time.Now() // now := time.Now()
// deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime)) // deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
// ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli())) // ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
// log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli()) // log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
// if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil { // if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
// log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now)) // log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
// return // return
// } // }
// log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now)) // log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
// } // }
// if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil { // if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil {
// return errs.Wrap(err) // return errs.Wrap(err)
// } // }
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime) log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start() crontab.Start()
<-ctx.Done() <-ctx.Done()
return nil return nil

View File

@ -18,9 +18,9 @@ const (
CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand" CallbackBeforeInviteJoinGroupCommand = "callbackBeforeInviteJoinGroupCommand"
CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand" CallbackAfterJoinGroupCommand = "callbackAfterJoinGroupCommand"
CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand" CallbackAfterSetGroupInfoCommand = "callbackAfterSetGroupInfoCommand"
CallbackAfterSetGroupInfoEXCommand = "callbackAfterSetGroupInfoCommandEX" CallbackAfterSetGroupInfoExCommand = "callbackAfterSetGroupInfoExCommand"
CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand" CallbackBeforeSetGroupInfoCommand = "callbackBeforeSetGroupInfoCommand"
CallbackBeforeSetGroupInfoEXCommand = "callbackBeforeSetGroupInfoEXCommand" CallbackBeforeSetGroupInfoExCommand = "callbackBeforeSetGroupInfoExCommand"
CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand" CallbackAfterRevokeMsgCommand = "callbackBeforeAfterMsgCommand"
CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand" CallbackBeforeAddBlackCommand = "callbackBeforeAddBlackCommand"
CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand" CallbackAfterAddFriendCommand = "callbackAfterAddFriendCommand"

View File

@ -244,11 +244,11 @@ type CallbackAfterSetGroupInfoResp struct {
CommonCallbackResp CommonCallbackResp
} }
type CallbackBeforeSetGroupInfoEXReq struct { type CallbackBeforeSetGroupInfoExReq struct {
CallbackCommand `json:"callbackCommand"` CallbackCommand `json:"callbackCommand"`
OperationID string `json:"operationID"` OperationID string `json:"operationID"`
GroupID string `json:"groupID"` GroupID string `json:"groupID"`
GroupName string `json:"groupName"` GroupName *wrapperspb.StringValue `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"` Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"` Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"` FaceURL *wrapperspb.StringValue `json:"faceURL"`
@ -258,10 +258,10 @@ type CallbackBeforeSetGroupInfoEXReq struct {
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
} }
type CallbackBeforeSetGroupInfoEXResp struct { type CallbackBeforeSetGroupInfoExResp struct {
CommonCallbackResp CommonCallbackResp
GroupID string `json:"groupID"` GroupID string `json:"groupID"`
GroupName string `json:"groupName"` GroupName *wrapperspb.StringValue `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"` Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"` Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"` FaceURL *wrapperspb.StringValue `json:"faceURL"`
@ -271,11 +271,11 @@ type CallbackBeforeSetGroupInfoEXResp struct {
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
} }
type CallbackAfterSetGroupInfoEXReq struct { type CallbackAfterSetGroupInfoExReq struct {
CallbackCommand `json:"callbackCommand"` CallbackCommand `json:"callbackCommand"`
OperationID string `json:"operationID"` OperationID string `json:"operationID"`
GroupID string `json:"groupID"` GroupID string `json:"groupID"`
GroupName string `json:"groupName"` GroupName *wrapperspb.StringValue `json:"groupName"`
Notification *wrapperspb.StringValue `json:"notification"` Notification *wrapperspb.StringValue `json:"notification"`
Introduction *wrapperspb.StringValue `json:"introduction"` Introduction *wrapperspb.StringValue `json:"introduction"`
FaceURL *wrapperspb.StringValue `json:"faceURL"` FaceURL *wrapperspb.StringValue `json:"faceURL"`
@ -285,6 +285,6 @@ type CallbackAfterSetGroupInfoEXReq struct {
ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"` ApplyMemberFriend *wrapperspb.Int32Value `json:"applyMemberFriend"`
} }
type CallbackAfterSetGroupInfoEXResp struct { type CallbackAfterSetGroupInfoExResp struct {
CommonCallbackResp CommonCallbackResp
} }

View File

@ -72,7 +72,6 @@ type CallbackAfterUpdateUserInfoExResp struct {
type CallbackBeforeUserRegisterReq struct { type CallbackBeforeUserRegisterReq struct {
CallbackCommand `json:"callbackCommand"` CallbackCommand `json:"callbackCommand"`
Secret string `json:"secret"`
Users []*sdkws.UserInfo `json:"users"` Users []*sdkws.UserInfo `json:"users"`
} }
@ -83,7 +82,6 @@ type CallbackBeforeUserRegisterResp struct {
type CallbackAfterUserRegisterReq struct { type CallbackAfterUserRegisterReq struct {
CallbackCommand `json:"callbackCommand"` CallbackCommand `json:"callbackCommand"`
Secret string `json:"secret"`
Users []*sdkws.UserInfo `json:"users"` Users []*sdkws.UserInfo `json:"users"`
} }

View File

@ -37,7 +37,6 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.configMap = map[string]any{ ret.configMap = map[string]any{
OpenIMPushCfgFileName: &pushConfig.RpcConfig, OpenIMPushCfgFileName: &pushConfig.RpcConfig,
RedisConfigFileName: &pushConfig.RedisConfig, RedisConfigFileName: &pushConfig.RedisConfig,
MongodbConfigFileName: &pushConfig.MongodbConfig,
KafkaConfigFileName: &pushConfig.KafkaConfig, KafkaConfigFileName: &pushConfig.KafkaConfig,
ShareFileName: &pushConfig.Share, ShareFileName: &pushConfig.Share,
NotificationFileName: &pushConfig.NotificationConfig, NotificationFileName: &pushConfig.NotificationConfig,

View File

@ -129,10 +129,11 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
} }
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error { func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
err := log.InitFromConfig( err := log.InitLoggerFromConfig(
cmdOpts.loggerPrefixName, cmdOpts.loggerPrefixName,
r.processName, r.processName,
"", "",
r.log.RemainLogLevel, r.log.RemainLogLevel,
r.log.IsStdout, r.log.IsStdout,
r.log.IsJson, r.log.IsJson,

View File

@ -81,9 +81,12 @@ type Kafka struct {
ToRedisTopic string `mapstructure:"toRedisTopic"` ToRedisTopic string `mapstructure:"toRedisTopic"`
ToMongoTopic string `mapstructure:"toMongoTopic"` ToMongoTopic string `mapstructure:"toMongoTopic"`
ToPushTopic string `mapstructure:"toPushTopic"` ToPushTopic string `mapstructure:"toPushTopic"`
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
ToRedisGroupID string `mapstructure:"toRedisGroupID"` ToRedisGroupID string `mapstructure:"toRedisGroupID"`
ToMongoGroupID string `mapstructure:"toMongoGroupID"` ToMongoGroupID string `mapstructure:"toMongoGroupID"`
ToPushGroupID string `mapstructure:"toPushGroupID"` ToPushGroupID string `mapstructure:"toPushGroupID"`
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"`
Tls TLSConfig `mapstructure:"tls"` Tls TLSConfig `mapstructure:"tls"`
} }
type TLSConfig struct { type TLSConfig struct {
@ -99,6 +102,7 @@ type API struct {
Api struct { Api struct {
ListenIP string `mapstructure:"listenIP"` ListenIP string `mapstructure:"listenIP"`
Ports []int `mapstructure:"ports"` Ports []int `mapstructure:"ports"`
CompressionLevel int `mapstructure:"compressionLevel"`
} `mapstructure:"api"` } `mapstructure:"api"`
Prometheus struct { Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `mapstructure:"enable"`
@ -220,6 +224,7 @@ type Push struct {
BadgeCount bool `mapstructure:"badgeCount"` BadgeCount bool `mapstructure:"badgeCount"`
Production bool `mapstructure:"production"` Production bool `mapstructure:"production"`
} `mapstructure:"iosPush"` } `mapstructure:"iosPush"`
FullUserCache bool `mapstructure:"fullUserCache"`
} }
type Auth struct { type Auth struct {
@ -336,7 +341,8 @@ type Redis struct {
Password string `mapstructure:"password"` Password string `mapstructure:"password"`
ClusterMode bool `mapstructure:"clusterMode"` ClusterMode bool `mapstructure:"clusterMode"`
DB int `mapstructure:"storage"` DB int `mapstructure:"storage"`
MaxRetry int `mapstructure:"MaxRetry"` MaxRetry int `mapstructure:"maxRetry"`
PoolSize int `mapstructure:"poolSize"`
} }
type BeforeConfig struct { type BeforeConfig struct {
@ -422,8 +428,8 @@ type Webhooks struct {
BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"` BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"`
AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"` AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"`
BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"` BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"`
AfterSetGroupInfoEX AfterConfig `mapstructure:"afterSetGroupInfoEX"` AfterSetGroupInfoEx AfterConfig `mapstructure:"afterSetGroupInfoEx"`
BeforeSetGroupInfoEX BeforeConfig `mapstructure:"beforeSetGroupInfoEX"` BeforeSetGroupInfoEx BeforeConfig `mapstructure:"beforeSetGroupInfoEx"`
AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"` AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"`
BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"` BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"`
AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"` AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"`
@ -474,6 +480,7 @@ func (r *Redis) Build() *redisutil.Config {
Password: r.Password, Password: r.Password,
DB: r.DB, DB: r.DB,
MaxRetry: r.MaxRetry, MaxRetry: r.MaxRetry,
PoolSize: r.PoolSize,
} }
} }

View File

@ -23,4 +23,8 @@ var (
Name: "msg_offline_push_failed_total", Name: "msg_offline_push_failed_total",
Help: "The number of msg failed offline pushed", Help: "The number of msg failed offline pushed",
}) })
MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "msg_long_time_push_total",
Help: "The number of messages with a push time exceeding 10 seconds",
})
) )

View File

@ -47,9 +47,17 @@ func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Co
case share.RpcRegisterName.MessageGateway: case share.RpcRegisterName.MessageGateway:
return []prometheus.Collector{OnlineUserGauge} return []prometheus.Collector{OnlineUserGauge}
case share.RpcRegisterName.Msg: case share.RpcRegisterName.Msg:
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter} return []prometheus.Collector{
SingleChatMsgProcessSuccessCounter,
SingleChatMsgProcessFailedCounter,
GroupChatMsgProcessSuccessCounter,
GroupChatMsgProcessFailedCounter,
}
case share.RpcRegisterName.Push: case share.RpcRegisterName.Push:
return []prometheus.Collector{MsgOfflinePushFailedCounter} return []prometheus.Collector{
MsgOfflinePushFailedCounter,
MsgLoneTimePushCounter,
}
case share.RpcRegisterName.Auth: case share.RpcRegisterName.Auth:
return []prometheus.Collector{UserLoginCounter} return []prometheus.Collector{UserLoginCounter}
case share.RpcRegisterName.User: case share.RpcRegisterName.User:

View File

@ -25,7 +25,6 @@ import (
"os" "os"
"os/signal" "os/signal"
"strconv" "strconv"
"sync"
"syscall" "syscall"
"time" "time"
@ -35,7 +34,6 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/network" "github.com/openimsdk/tools/utils/network"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
@ -54,6 +52,7 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort, log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
"prometheusPorts", prometheusConfig.Ports) "prometheusPorts", prometheusConfig.Ports)
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
listener, err := net.Listen( listener, err := net.Listen(
"tcp", "tcp",
rpcTcpAddr, rpcTcpAddr,
@ -61,7 +60,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
if err != nil { if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
} }
defer listener.Close() defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(discovery, share) client, err := kdisc.NewDiscoveryRegister(discovery, share)
if err != nil { if err != nil {
@ -92,10 +90,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
} }
srv := grpc.NewServer(options...) srv := grpc.NewServer(options...)
once := sync.Once{}
defer func() {
once.Do(srv.GracefulStop)
}()
err = rpcFn(ctx, config, client, srv) err = rpcFn(ctx, config, client, srv)
if err != nil { if err != nil {
@ -115,7 +109,6 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
var ( var (
netDone = make(chan struct{}, 2) netDone = make(chan struct{}, 2)
netErr error netErr error
httpServer *http.Server
) )
if prometheusConfig.Enable { if prometheusConfig.Enable {
go func() { go func() {
@ -152,18 +145,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
signal.Notify(sigs, syscall.SIGTERM) signal.Notify(sigs, syscall.SIGTERM)
select { select {
case <-sigs: case <-sigs:
program.SIGTERMExit() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel() defer cancel()
if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil {
return err return err
} }
ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
err := httpServer.Shutdown(ctx)
if err != nil {
return errs.WrapMsg(err, "shutdown err")
}
return nil return nil
case <-netDone: case <-netDone:
return netErr return netErr

View File

@ -1,6 +1,9 @@
package cachekey package cachekey
import "time" import (
"strings"
"time"
)
const ( const (
OnlineKey = "ONLINE:" OnlineKey = "ONLINE:"
@ -11,3 +14,7 @@ const (
func GetOnlineKey(userID string) string { func GetOnlineKey(userID string) string {
return OnlineKey + userID return OnlineKey + userID
} }
func GetOnlineKeyUserID(key string) string {
return strings.TrimPrefix(key, OnlineKey)
}

View File

@ -5,4 +5,5 @@ import "context"
type OnlineCache interface { type OnlineCache interface {
GetOnline(ctx context.Context, userID string) ([]int32, error) GetOnline(ctx context.Context, userID string) ([]int32, error)
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight" "golang.org/x/sync/singleflight"
@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
} }
bs, err := json.Marshal(value) bs, err := json.Marshal(value)
if err != nil { if err != nil {
log.ZError(ctx, "marshal failed", err)
return nil, err return nil, err
} }
cacheIndex[index] = string(bs) cacheIndex[index] = string(bs)
@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
return cacheIndex, nil return cacheIndex, nil
}) })
if err != nil { if err != nil {
return nil, err return nil, errs.WrapMsg(err, "FetchBatch2 failed")
} }
for index, data := range indexCache { for index, data := range indexCache {
if data == "" { if data == "" {
@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
} }
var value V var value V
if err := json.Unmarshal([]byte(data), &value); err != nil { if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, err return nil, errs.WrapMsg(err, "Unmarshal failed")
} }
if cb, ok := any(&value).(BatchCacheCallback[K]); ok { if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]]) cb.BatchCache(keyId[keys[index]])

View File

@ -28,6 +28,10 @@ import (
"time" "time"
) )
const (
rocksCacheTimeout = 11 * time.Second
)
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache. // BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
type BatchDeleterRedis struct { type BatchDeleterRedis struct {
redisClient redis.UniversalClient redisClient redis.UniversalClient
@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
// GetRocksCacheOptions returns the default configuration options for RocksCache. // GetRocksCacheOptions returns the default configuration options for RocksCache.
func GetRocksCacheOptions() *rockscache.Options { func GetRocksCacheOptions() *rockscache.Options {
opts := rockscache.NewDefaultOptions() opts := rockscache.NewDefaultOptions()
opts.LockExpire = rocksCacheTimeout
opts.WaitReplicasTimeout = rocksCacheTimeout
opts.StrongConsistency = true opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2 opts.RandomExpireAdjustment = 0.2
@ -118,7 +124,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
t, err = fn(ctx) t, err = fn(ctx)
if err != nil { if err != nil {
log.ZError(ctx, "getCache query database failed", err, "key", key) //log.ZError(ctx, "getCache query database failed", err, "key", key)
return "", err return "", err
} }
bs, err := json.Marshal(t) bs, err := json.Marshal(t)

View File

@ -2,8 +2,10 @@ package redis
import ( import (
"context" "context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
@ -49,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err
return platformIDs, nil return platformIDs, nil
} }
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
result := make(map[string][]int32)
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
if err != nil {
return nil, 0, err
}
for _, key := range keys {
userID := cachekey.GetOnlineKeyUserID(key)
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
if err != nil {
return nil, 0, err
}
values := make([]int32, 0, len(strValues))
for _, value := range strValues {
intValue, err := strconv.Atoi(value)
if err != nil {
return nil, 0, errs.Wrap(err)
}
values = append(values, int32(intValue))
}
result[userID] = values
}
return result, nextCursor, nil
}
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error { func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
script := ` script := `
local key = KEYS[1] local key = KEYS[1]

View File

@ -19,8 +19,8 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/stringutil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"strconv"
"time" "time"
) )
@ -58,9 +58,12 @@ func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, p
} }
mm := make(map[string]int) mm := make(map[string]int)
for k, v := range m { for k, v := range m {
mm[k] = stringutil.StringToInt(v) state, err := strconv.Atoi(v)
if err != nil {
return nil, errs.WrapMsg(err, "redis token value is not int", "value", v, "userID", userID, "platformID", platformID)
}
mm[k] = state
} }
return mm, nil return mm, nil
} }

View File

@ -26,7 +26,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbmsg "github.com/openimsdk/protocol/msg" pbmsg "github.com/openimsdk/protocol/msg"
@ -47,16 +46,10 @@ const (
// CommonMsgDatabase defines the interface for message database operations. // CommonMsgDatabase defines the interface for message database operations.
type CommonMsgDatabase interface { type CommonMsgDatabase interface {
// BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation.
BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error
// RevokeMsg revokes a message in a conversation. // RevokeMsg revokes a message in a conversation.
RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error
// MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers. // MarkSingleChatMsgsAsRead marks messages as read for a single chat by sequence numbers.
MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error MarkSingleChatMsgsAsRead(ctx context.Context, userID string, conversationID string, seqs []int64) error
// DeleteMessagesFromCache deletes message caches from Redis by sequence numbers.
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
// BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache.
BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error)
// GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers. // GetMsgBySeqsRange retrieves messages from MongoDB by a range of sequence numbers.
GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (minSeq int64, maxSeq int64, seqMsg []*sdkws.MsgData, err error)
// GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers. // GetMsgBySeqs retrieves messages for large groups from MongoDB by sequence numbers.
@ -77,7 +70,6 @@ type CommonMsgDatabase interface {
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error)
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
@ -86,13 +78,11 @@ type CommonMsgDatabase interface {
//GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error) //GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
SetSendMsgStatus(ctx context.Context, id string, status int32) error SetSendMsgStatus(ctx context.Context, id string, status int32) error
GetSendMsgStatus(ctx context.Context, id string) (int32, error) GetSendMsgStatus(ctx context.Context, id string) (int32, error)
SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*pbmsg.SearchedMsgData, err error)
FindOneByDocIDs(ctx context.Context, docIDs []string, seqs map[string]int64) (map[string]*sdkws.MsgData, error) FindOneByDocIDs(ctx context.Context, docIDs []string, seqs map[string]int64) (map[string]*sdkws.MsgData, error)
// to mq // to mq
MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error) RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error) RangeGroupSendCount(ctx context.Context, start time.Time, end time.Time, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, groups []*model.GroupCount, dateCount map[string]int64, err error)
@ -114,22 +104,12 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser
if err != nil { if err != nil {
return nil, err return nil, err
} }
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
if err != nil {
return nil, err
}
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
if err != nil {
return nil, err
}
return &commonMsgDatabase{ return &commonMsgDatabase{
msgDocDatabase: msgDocModel, msgDocDatabase: msgDocModel,
msg: msg, msg: msg,
seqUser: seqUser, seqUser: seqUser,
seqConversation: seqConversation, seqConversation: seqConversation,
producer: producerToRedis, producer: producerToRedis,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil }, nil
} }
@ -140,8 +120,6 @@ type commonMsgDatabase struct {
seqConversation cache.SeqConversationCache seqConversation cache.SeqConversationCache
seqUser cache.SeqUser seqUser cache.SeqUser
producer *kafka.Producer producer *kafka.Producer
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
} }
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error { func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
@ -149,23 +127,6 @@ func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sd
return err return err
} }
func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
return 0, 0, err
}
return partition, offset, nil
}
func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
if len(messages) > 0 {
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
return err
}
return nil
}
func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {
if len(fields) == 0 { if len(fields) == 0 {
return nil return nil
@ -267,52 +228,6 @@ func (db *commonMsgDatabase) BatchInsertBlock(ctx context.Context, conversationI
return nil return nil
} }
func (db *commonMsgDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
if len(msgList) == 0 {
return errs.ErrArgs.WrapMsg("msgList is empty")
}
msgs := make([]any, len(msgList))
for i, msg := range msgList {
if msg == nil {
continue
}
var offlinePushModel *model.OfflinePushModel
if msg.OfflinePushInfo != nil {
offlinePushModel = &model.OfflinePushModel{
Title: msg.OfflinePushInfo.Title,
Desc: msg.OfflinePushInfo.Desc,
Ex: msg.OfflinePushInfo.Ex,
IOSPushSound: msg.OfflinePushInfo.IOSPushSound,
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
}
}
msgs[i] = &model.MsgDataModel{
SendID: msg.SendID,
RecvID: msg.RecvID,
GroupID: msg.GroupID,
ClientMsgID: msg.ClientMsgID,
ServerMsgID: msg.ServerMsgID,
SenderPlatformID: msg.SenderPlatformID,
SenderNickname: msg.SenderNickname,
SenderFaceURL: msg.SenderFaceURL,
SessionType: msg.SessionType,
MsgFrom: msg.MsgFrom,
ContentType: msg.ContentType,
Content: string(msg.Content),
Seq: msg.Seq,
SendTime: msg.SendTime,
CreateTime: msg.CreateTime,
Status: msg.Status,
Options: msg.Options,
OfflinePush: offlinePushModel,
AtUserIDList: msg.AtUserIDList,
AttachedInfo: msg.AttachedInfo,
Ex: msg.Ex,
}
}
return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq)
}
func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error { func (db *commonMsgDatabase) RevokeMsg(ctx context.Context, conversationID string, seq int64, revoke *model.RevokeModel) error {
return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq) return db.BatchInsertBlock(ctx, conversationID, []any{revoke}, updateKeyRevoke, seq)
} }
@ -332,56 +247,6 @@ func (db *commonMsgDatabase) MarkSingleChatMsgsAsRead(ctx context.Context, userI
return nil return nil
} }
func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
for userID, seq := range userSeqMap {
if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
return nil
}
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
}
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
if err != nil {
log.ZError(ctx, "storage.seq.Malloc", err)
return 0, false, err
}
isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
currentMaxSeq++
m.Seq = currentMaxSeq
userSeqMap[m.SendID] = m.Seq
}
failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs)
if err != nil {
prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
}
return lastMaxSeq, isNew, errs.Wrap(err)
}
func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) { func (db *commonMsgDatabase) getMsgBySeqs(ctx context.Context, userID, conversationID string, seqs []int64) (totalMsgs []*sdkws.MsgData, err error) {
for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) { for docID, seqs := range db.msgTable.GetDocIDSeqsMap(conversationID, seqs) {
// log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs) // log.ZDebug(ctx, "getMsgBySeqs", "docID", docID, "seqs", seqs)
@ -581,7 +446,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) { func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) {
userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID) userMinSeq, err := db.seqUser.GetUserMinSeq(ctx, conversationID, userID)
if err != nil && errs.Unwrap(err) != redis.Nil { if err != nil {
return 0, 0, nil, err return 0, 0, nil, err
} }
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID) minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
@ -592,15 +457,28 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
if err != nil { if err != nil {
return 0, 0, nil, err return 0, 0, nil, err
} }
if userMinSeq < minSeq { userMaxSeq, err := db.seqUser.GetUserMaxSeq(ctx, conversationID, userID)
if err != nil {
return 0, 0, nil, err
}
if userMinSeq > minSeq {
minSeq = userMinSeq minSeq = userMinSeq
} }
var newSeqs []int64 if userMaxSeq > 0 && userMaxSeq < maxSeq {
maxSeq = userMaxSeq
}
newSeqs := make([]int64, 0, len(seqs))
for _, seq := range seqs { for _, seq := range seqs {
if seq <= 0 {
continue
}
if seq >= minSeq && seq <= maxSeq { if seq >= minSeq && seq <= maxSeq {
newSeqs = append(newSeqs, seq) newSeqs = append(newSeqs, seq)
} }
} }
if len(newSeqs) == 0 {
return minSeq, maxSeq, nil, nil
}
successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs) successMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, newSeqs)
if err != nil { if err != nil {
if err != redis.Nil { if err != redis.Nil {
@ -809,10 +687,6 @@ func (db *commonMsgDatabase) SetHasReadSeq(ctx context.Context, userID string, c
return db.seqUser.SetUserReadSeq(ctx, conversationID, userID, hasReadSeq) return db.seqUser.SetUserReadSeq(ctx, conversationID, userID, hasReadSeq)
} }
func (db *commonMsgDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq)
}
func (db *commonMsgDatabase) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) { func (db *commonMsgDatabase) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
return db.seqUser.GetUserReadSeqs(ctx, userID, conversationIDs) return db.seqUser.GetUserReadSeqs(ctx, userID, conversationIDs)
} }
@ -886,8 +760,8 @@ func (db *commonMsgDatabase) RangeGroupSendCount(
return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber) return db.msgDocDatabase.RangeGroupSendCount(ctx, start, end, ase, pageNumber, showNumber)
} }
func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*sdkws.MsgData, err error) { func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int64, msgData []*pbmsg.SearchedMsgData, err error) {
var totalMsgs []*sdkws.MsgData var totalMsgs []*pbmsg.SearchedMsgData
total, msgs, err := db.msgDocDatabase.SearchMessage(ctx, req) total, msgs, err := db.msgDocDatabase.SearchMessage(ctx, req)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err
@ -896,7 +770,13 @@ func (db *commonMsgDatabase) SearchMessage(ctx context.Context, req *pbmsg.Searc
if msg.IsRead { if msg.IsRead {
msg.Msg.IsRead = true msg.Msg.IsRead = true
} }
totalMsgs = append(totalMsgs, convert.MsgDB2Pb(msg.Msg)) searchedMsgData := &pbmsg.SearchedMsgData{MsgData: convert.MsgDB2Pb(msg.Msg)}
if msg.Revoke != nil {
searchedMsgData.IsRevoked = true
}
totalMsgs = append(totalMsgs, searchedMsgData)
} }
return total, totalMsgs, nil return total, totalMsgs, nil
} }

View File

@ -0,0 +1,286 @@
package controller
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
pbmsg "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"go.mongodb.org/mongo-driver/mongo"
)
type MsgTransferDatabase interface {
// BatchInsertChat2DB inserts a batch of messages into the database for a specific conversation.
BatchInsertChat2DB(ctx context.Context, conversationID string, msgs []*sdkws.MsgData, currentMaxSeq int64) error
// DeleteMessagesFromCache deletes message caches from Redis by sequence numbers.
DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error
// BatchInsertChat2Cache increments the sequence number and then batch inserts messages into the cache.
BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNewConversation bool, err error)
SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
// to mq
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
}
func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil, err
}
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
if err != nil {
return nil, err
}
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
if err != nil {
return nil, err
}
return &msgTransferDatabase{
msgDocDatabase: msgDocModel,
msg: msg,
seqUser: seqUser,
seqConversation: seqConversation,
producerToMongo: producerToMongo,
producerToPush: producerToPush,
}, nil
}
type msgTransferDatabase struct {
msgDocDatabase database.Msg
msgTable model.MsgDocModel
msg cache.MsgCache
seqConversation cache.SeqConversationCache
seqUser cache.SeqUser
producerToMongo *kafka.Producer
producerToPush *kafka.Producer
}
func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
if len(msgList) == 0 {
return errs.ErrArgs.WrapMsg("msgList is empty")
}
msgs := make([]any, len(msgList))
for i, msg := range msgList {
if msg == nil {
continue
}
var offlinePushModel *model.OfflinePushModel
if msg.OfflinePushInfo != nil {
offlinePushModel = &model.OfflinePushModel{
Title: msg.OfflinePushInfo.Title,
Desc: msg.OfflinePushInfo.Desc,
Ex: msg.OfflinePushInfo.Ex,
IOSPushSound: msg.OfflinePushInfo.IOSPushSound,
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
}
}
msgs[i] = &model.MsgDataModel{
SendID: msg.SendID,
RecvID: msg.RecvID,
GroupID: msg.GroupID,
ClientMsgID: msg.ClientMsgID,
ServerMsgID: msg.ServerMsgID,
SenderPlatformID: msg.SenderPlatformID,
SenderNickname: msg.SenderNickname,
SenderFaceURL: msg.SenderFaceURL,
SessionType: msg.SessionType,
MsgFrom: msg.MsgFrom,
ContentType: msg.ContentType,
Content: string(msg.Content),
Seq: msg.Seq,
SendTime: msg.SendTime,
CreateTime: msg.CreateTime,
Status: msg.Status,
Options: msg.Options,
OfflinePush: offlinePushModel,
AtUserIDList: msg.AtUserIDList,
AttachedInfo: msg.AttachedInfo,
Ex: msg.Ex,
}
}
return db.BatchInsertBlock(ctx, conversationID, msgs, updateKeyMsg, msgList[0].Seq)
}
func (db *msgTransferDatabase) BatchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {
if len(fields) == 0 {
return nil
}
num := db.msgTable.GetSingleGocMsgNum()
// num = 100
for i, field := range fields { // Check the type of the field
var ok bool
switch key {
case updateKeyMsg:
var msg *model.MsgDataModel
msg, ok = field.(*model.MsgDataModel)
if msg != nil && msg.Seq != firstSeq+int64(i) {
return errs.ErrInternalServer.WrapMsg("seq is invalid")
}
case updateKeyRevoke:
_, ok = field.(*model.RevokeModel)
default:
return errs.ErrInternalServer.WrapMsg("key is invalid")
}
if !ok {
return errs.ErrInternalServer.WrapMsg("field type is invalid")
}
}
// Returns true if the document exists in the database, false if the document does not exist in the database
updateMsgModel := func(seq int64, i int) (bool, error) {
var (
res *mongo.UpdateResult
err error
)
docID := db.msgTable.GetDocID(conversationID, seq)
index := db.msgTable.GetMsgIndex(seq)
field := fields[i]
switch key {
case updateKeyMsg:
res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "msg", field)
case updateKeyRevoke:
res, err = db.msgDocDatabase.UpdateMsg(ctx, docID, index, "revoke", field)
}
if err != nil {
return false, err
}
return res.MatchedCount > 0, nil
}
tryUpdate := true
for i := 0; i < len(fields); i++ {
seq := firstSeq + int64(i) // Current sequence number
if tryUpdate {
matched, err := updateMsgModel(seq, i)
if err != nil {
return err
}
if matched {
continue // The current data has been updated, skip the current data
}
}
doc := model.MsgDocModel{
DocID: db.msgTable.GetDocID(conversationID, seq),
Msg: make([]*model.MsgInfoModel, num),
}
var insert int // Inserted data number
for j := i; j < len(fields); j++ {
seq = firstSeq + int64(j)
if db.msgTable.GetDocID(conversationID, seq) != doc.DocID {
break
}
insert++
switch key {
case updateKeyMsg:
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
Msg: fields[j].(*model.MsgDataModel),
}
case updateKeyRevoke:
doc.Msg[db.msgTable.GetMsgIndex(seq)] = &model.MsgInfoModel{
Revoke: fields[j].(*model.RevokeModel),
}
}
}
for i, msgInfo := range doc.Msg {
if msgInfo == nil {
msgInfo = &model.MsgInfoModel{}
doc.Msg[i] = msgInfo
}
if msgInfo.DelList == nil {
doc.Msg[i].DelList = []string{}
}
}
if err := db.msgDocDatabase.Create(ctx, &doc); err != nil {
if mongo.IsDuplicateKeyError(err) {
i-- // already inserted
tryUpdate = true // next block use update mode
continue
}
return err
}
tryUpdate = false // The current block is inserted successfully, and the next block is inserted preferentially
i += insert - 1 // Skip the inserted data
}
return nil
}
func (db *msgTransferDatabase) DeleteMessagesFromCache(ctx context.Context, conversationID string, seqs []int64) error {
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
}
func (db *msgTransferDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
lenList := len(msgs)
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
}
if lenList < 1 {
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
}
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
if err != nil {
log.ZError(ctx, "storage.seq.Malloc", err)
return 0, false, err
}
isNew = currentMaxSeq == 0
lastMaxSeq := currentMaxSeq
userSeqMap := make(map[string]int64)
for _, m := range msgs {
currentMaxSeq++
m.Seq = currentMaxSeq
userSeqMap[m.SendID] = m.Seq
}
failedNum, err := db.msg.SetMessagesToCache(ctx, conversationID, msgs)
if err != nil {
prommetrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
} else {
prommetrics.MsgInsertRedisSuccessCounter.Inc()
}
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
if err != nil {
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
prommetrics.SeqSetFailedCounter.Inc()
}
return lastMaxSeq, isNew, errs.Wrap(err)
}
func (db *msgTransferDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
for userID, seq := range userSeqMap {
if err := db.seqUser.SetUserReadSeq(ctx, conversationID, userID, seq); err != nil {
return err
}
}
return nil
}
func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
return db.seqUser.SetUserReadSeqToDB(ctx, conversationID, userID, hasReadSeq)
}
func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
return 0, 0, err
}
return partition, offset, nil
}
func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
if len(messages) > 0 {
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
if err != nil {
log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq)
return err
}
}
return nil
}

View File

@ -17,21 +17,45 @@ package controller
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
) )
type PushDatabase interface { type PushDatabase interface {
DelFcmToken(ctx context.Context, userID string, platformID int) error DelFcmToken(ctx context.Context, userID string, platformID int) error
MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error
} }
type pushDataBase struct { type pushDataBase struct {
cache cache.ThirdCache cache cache.ThirdCache
producerToOfflinePush *kafka.Producer
} }
func NewPushDatabase(cache cache.ThirdCache) PushDatabase { func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase {
return &pushDataBase{cache: cache} conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil
}
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
if err != nil {
return nil
}
return &pushDataBase{
cache: cache,
producerToOfflinePush: producerToOfflinePush,
}
} }
func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error { func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error {
return p.cache.DelFcmToken(ctx, userID, platformID) return p.cache.DelFcmToken(ctx, userID, platformID)
} }
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String())
return err
}

View File

@ -16,12 +16,13 @@ package controller
import ( import (
"context" "context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/db/pagination"
"github.com/openimsdk/tools/db/tx" "github.com/openimsdk/tools/db/tx"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"time"
"github.com/openimsdk/protocol/user" "github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -111,10 +112,14 @@ func (u *userDatabase) InitOnce(ctx context.Context, users []*model.User) error
// FindWithError Get the information of the specified user and return an error if the userID is not found. // FindWithError Get the information of the specified user and return an error if the userID is not found.
func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error) { func (u *userDatabase) FindWithError(ctx context.Context, userIDs []string) (users []*model.User, err error) {
userIDs = datautil.Distinct(userIDs) userIDs = datautil.Distinct(userIDs)
// TODO: Add logic to identify which user IDs are distinct and which user IDs were not found.
users, err = u.cache.GetUsersInfo(ctx, userIDs) users, err = u.cache.GetUsersInfo(ctx, userIDs)
if err != nil { if err != nil {
return return
} }
if len(users) != len(userIDs) { if len(users) != len(userIDs) {
err = errs.ErrRecordNotFound.WrapMsg("userID not found") err = errs.ErrRecordNotFound.WrapMsg("userID not found")
} }

View File

@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface { type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error) Get(key K, fetch func() (V, error)) (V, error)
Set(key K, value V)
SetHas(key K, value V) bool SetHas(key K, value V) bool
GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error)
Del(key K) bool Del(key K) bool
Stop() Stop()
} }

View File

@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct {
target Target target Target
} }
func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
//TODO implement me
panic("implement me")
}
func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) { func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
x.lock.Lock() x.lock.Lock()
v, ok := x.core.Get(key) v, ok := x.core.Get(key)
@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
return false return false
} }
func (x *ExpirationLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &expirationLruItem[V]{value: value})
}
func (x *ExpirationLRU[K, V]) Stop() { func (x *ExpirationLRU[K, V]) Stop() {
} }

View File

@ -88,18 +88,75 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err return v.value, v.err
} }
//func (x *LayLRU[K, V]) Set(key K, value V) { func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
// x.lock.Lock() var (
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()}) err error
// x.lock.Unlock() once sync.Once
//} )
//
res := make(map[K]V)
queries := make([]K, 0)
setVs := make(map[K]*layLruItem[V])
for _, key := range keys {
x.lock.Lock()
v, ok := x.core.Get(key)
x.lock.Unlock()
if ok {
v.lock.Lock()
expires, value, err1 := v.expires, v.value, v.err
v.lock.Unlock()
if expires != 0 && expires > time.Now().UnixMilli() {
x.target.IncrGetHit()
res[key] = value
if err1 != nil {
once.Do(func() {
err = err1
})
}
continue
}
}
queries = append(queries, key)
}
values, err1 := fetch(queries)
if err1 != nil {
once.Do(func() {
err = err1
})
}
for key, val := range values {
v := &layLruItem[V]{}
v.value = val
if err == nil {
v.expires = time.Now().Add(x.successTTL).UnixMilli()
x.target.IncrGetSuccess()
} else {
v.expires = time.Now().Add(x.failedTTL).UnixMilli()
x.target.IncrGetFailed()
}
setVs[key] = v
x.lock.Lock()
x.core.Add(key, v)
x.lock.Unlock()
res[key] = val
}
return res, err
}
//func (x *LayLRU[K, V]) Has(key K) bool { //func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock() // x.lock.Lock()
// defer x.lock.Unlock() // defer x.lock.Unlock()
// return x.core.Contains(key) // return x.core.Contains(key)
//} //}
func (x *LayLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
}
func (x *LayLRU[K, V]) SetHas(key K, value V) bool { func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock() x.lock.Lock()
defer x.lock.Unlock() defer x.lock.Unlock()

View File

@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct {
hash func(k K) uint64 hash func(k K) uint64
} }
func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
slotKeys = make(map[uint64][]K)
vs = make(map[K]V)
)
for _, k := range keys {
index := x.getIndex(k)
slotKeys[index] = append(slotKeys[index], k)
}
for k, v := range slotKeys {
batches, err := x.slots[k].GetBatch(v, fetch)
if err != nil {
return nil, err
}
for key, value := range batches {
vs[key] = value
}
}
return vs, nil
}
func (x *slotLRU[K, V]) getIndex(k K) uint64 { func (x *slotLRU[K, V]) getIndex(k K) uint64 {
return x.hash(k) % x.n return x.hash(k) % x.n
} }
@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch) return x.slots[x.getIndex(key)].Get(key, fetch)
} }
func (x *slotLRU[K, V]) Set(key K, value V) {
x.slots[x.getIndex(key)].Set(key, value)
}
func (x *slotLRU[K, V]) SetHas(key K, value V) bool { func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
return x.slots[x.getIndex(key)].SetHas(key, value) return x.slots[x.getIndex(key)].SetHas(key, value)
} }

View File

@ -86,7 +86,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co
if err == nil { if err == nil {
log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val) log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val)
} else { } else {
log.ZError(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID) log.ZWarn(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID)
} }
}() }()
var cache cacheProto[pbconversation.Conversation] var cache cacheProto[pbconversation.Conversation]

View File

@ -2,60 +2,197 @@ package rpccache
import ( import (
"context" "context"
"fmt"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/user"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru" "github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline" "github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
"github.com/openimsdk/tools/db/cacheutil"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"math/rand"
"strconv"
"time"
) )
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache { func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) {
l := &sync.Mutex{}
x := &OnlineCache{ x := &OnlineCache{
user: user, user: user,
group: group, group: group,
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] { fullUserCache: fullUserCache,
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {}) Lock: l,
}), Cond: sync.NewCond(l),
} }
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
switch x.fullUserCache {
case true:
log.ZDebug(ctx, "fullUserCache is true")
x.mapCache = cacheutil.NewCache[string, []int32]()
go func() { go func() {
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10)) if err := x.initUsersOnlineStatus(ctx); err != nil {
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() { log.ZError(ctx, "initUsersOnlineStatus failed", err)
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
continue
}
storageCache := x.setUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil {
fn(ctx, userID, platformIDs)
}
} }
}() }()
return x case false:
log.ZDebug(ctx, "fullUserCache is false")
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
})
x.CurrentPhase.Store(DoSubscribeOver)
x.Cond.Broadcast()
} }
go func() {
x.doSubscribe(ctx, rdb, fn)
}()
return x, nil
}
const (
Begin uint32 = iota
DoOnlineStatusOver
DoSubscribeOver
)
type OnlineCache struct { type OnlineCache struct {
user rpcclient.UserRpcClient user rpcclient.UserRpcClient
group *GroupLocalCache group *GroupLocalCache
local lru.LRU[string, []int32]
// fullUserCache if enabled, caches the online status of all users using mapCache;
// otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache.
fullUserCache bool
lruCache lru.LRU[string, []int32]
mapCache *cacheutil.Cache[string, []int32]
Lock *sync.Mutex
Cond *sync.Cond
CurrentPhase atomic.Uint32
}
func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) {
log.ZDebug(ctx, "init users online status begin")
var (
totalSet atomic.Int64
maxTries = 5
retryInterval = time.Second * 5
resp *user.GetAllOnlineUsersResp
)
defer func(t time.Time) {
log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load())
o.CurrentPhase.Store(DoOnlineStatusOver)
o.Cond.Broadcast()
}(time.Now())
retryOperation := func(operation func() error, operationName string) error {
for i := 0; i < maxTries; i++ {
if err = operation(); err != nil {
log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err)
time.Sleep(retryInterval)
} else {
return nil
}
}
return err
}
cursor := uint64(0)
for resp == nil || resp.NextCursor != 0 {
if err = retryOperation(func() error {
resp, err = o.user.GetAllOnlineUsers(ctx, cursor)
if err != nil {
return err
}
for _, u := range resp.StatusList {
if u.Status == constant.Online {
o.setUserOnline(u.UserID, u.PlatformIDs)
}
totalSet.Add(1)
}
cursor = resp.NextCursor
return nil
}, "getAllOnlineUsers"); err != nil {
return err
}
}
return nil
}
func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) {
o.Lock.Lock()
ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel()
for o.CurrentPhase.Load() < DoOnlineStatusOver {
o.Cond.Wait()
}
o.Lock.Unlock()
log.ZInfo(ctx, "begin doSubscribe")
doMessage := func(message *redis.Message) {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
return
}
log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs)
switch o.fullUserCache {
case true:
if len(platformIDs) == 0 {
// offline
o.mapCache.Delete(userID)
} else {
o.mapCache.Store(userID, platformIDs)
}
case false:
storageCache := o.setHasUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil {
fn(ctx, userID, platformIDs)
}
}
}
if o.CurrentPhase.Load() == DoOnlineStatusOver {
for done := false; !done; {
select {
case message := <-ch:
doMessage(message)
default:
o.CurrentPhase.Store(DoSubscribeOver)
o.Cond.Broadcast()
done = true
}
}
}
for message := range ch {
doMessage(message)
}
} }
func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) { func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
platformIDs, err := o.local.Get(userID, func() ([]int32, error) { platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) {
return o.user.GetUserOnlinePlatform(ctx, userID) return o.user.GetUserOnlinePlatform(ctx, userID)
}) })
if err != nil { if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID) log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID)
return nil, err return nil, err
} }
log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs) //log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
return platformIDs, nil return platformIDs, nil
} }
@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string)
return platformIDs, nil return platformIDs, nil
} }
// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) {
// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs)
// if err != nil {
// return nil, err
// }
// tmp := make([]int32, len(platformIDs))
// copy(tmp, platformIDs)
// return platformIDs, nil
// }
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) { func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
platformIDs, err := o.getUserOnlinePlatform(ctx, userID) platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
if err != nil { if err != nil {
@ -77,6 +224,64 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
return len(platformIDs) > 0, nil return len(platformIDs) > 0, nil
} }
func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) {
platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) {
platformIDsMap := make(map[string][]int32)
usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers)
if err != nil {
return nil, err
}
for _, u := range usersStatus {
platformIDsMap[u.UserID] = u.PlatformIDs
}
return platformIDsMap, nil
})
if err != nil {
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs)
return nil, err
}
return platformIDsMap, nil
}
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) {
t := time.Now()
var (
onlineUserIDs = make([]string, 0, len(userIDs))
offlineUserIDs = make([]string, 0, len(userIDs))
)
switch o.fullUserCache {
case true:
for _, userID := range userIDs {
if _, ok := o.mapCache.Load(userID); ok {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
case false:
userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs)
if err != nil {
return nil, nil, err
}
for key, value := range userOnlineMap {
if len(value) > 0 {
onlineUserIDs = append(onlineUserIDs, key)
} else {
offlineUserIDs = append(offlineUserIDs, key)
}
}
}
log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t))
return userIDs, offlineUserIDs, nil
}
//func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) { //func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
// onlineUserIDs := make([]string, 0, len(userIDs)) // onlineUserIDs := make([]string, 0, len(userIDs))
// for _, userID := range userIDs { // for _, userID := range userIDs {
@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
// return onlineUserIDs, nil // return onlineUserIDs, nil
//} //}
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool { func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) {
return o.local.SetHas(userID, platformIDs) switch o.fullUserCache {
case true:
o.mapCache.Store(userID, platformIDs)
case false:
o.lruCache.Set(userID, platformIDs)
}
}
func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool {
return o.lruCache.SetHas(userID, platformIDs)
} }

View File

@ -17,6 +17,11 @@ package rpcclient
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"time"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
@ -28,9 +33,6 @@ import (
"github.com/openimsdk/tools/utils/idutil" "github.com/openimsdk/tools/utils/idutil"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/tools/utils/timeutil"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"time"
) )
func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig { func newContentTypeConf(conf *config.Notification) map[int32]config.NotificationConfig {
@ -221,6 +223,19 @@ func (m *MessageRpcClient) PullMessageBySeqList(ctx context.Context, req *sdkws.
return resp, nil return resp, nil
} }
func (m *MessageRpcClient) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *msg.GetConversationsHasReadAndMaxSeqReq) (*msg.GetConversationsHasReadAndMaxSeqResp, error) {
resp, err := m.Client.GetConversationsHasReadAndMaxSeq(ctx, req)
if err != nil {
// Wrap the error to provide more context if the gRPC call fails.
return nil, err
}
return resp, nil
}
func (m *MessageRpcClient) GetSeqMessage(ctx context.Context, req *msg.GetSeqMessageReq) (*msg.GetSeqMessageResp, error) {
return m.Client.GetSeqMessage(ctx, req)
}
func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) { func (m *MessageRpcClient) GetConversationMaxSeq(ctx context.Context, conversationID string) (int64, error) {
resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID}) resp, err := m.Client.GetConversationMaxSeq(ctx, &msg.GetConversationMaxSeqReq{ConversationID: conversationID})
if err != nil { if err != nil {

View File

@ -169,6 +169,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error {
return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID) return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID)
} }
// GetAllUserID retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
if err != nil {
return nil, err
}
return resp, nil
}
// GetAllUserIDs retrieves all user IDs with pagination options. // GetAllUserIDs retrieves all user IDs with pagination options.
func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) { func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) {
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}}) resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
@ -215,3 +224,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string
} }
return resp[0].PlatformIDs, nil return resp[0].PlatformIDs, nil
} }
func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) {
return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor})
}

View File

@ -19,6 +19,14 @@ func GenGroupConversationID(groupID string) string {
return "sg_" + groupID return "sg_" + groupID
} }
func IsGroupConversationID(conversationID string) bool {
return strings.HasPrefix(conversationID, "sg_")
}
func IsNotificationConversationID(conversationID string) bool {
return strings.HasPrefix(conversationID, "n_")
}
func GenConversationUniqueKeyForSingle(sendID, recvID string) string { func GenConversationUniqueKeyForSingle(sendID, recvID string) string {
l := []string{sendID, recvID} l := []string{sendID, recvID}
sort.Strings(l) sort.Strings(l)

View File

@ -35,7 +35,7 @@ done
echo "Kafka is ready. Creating topics..." echo "Kafka is ready. Creating topics..."
topics=("toRedis" "toMongo" "toPush") topics=("toRedis" "toMongo" "toPush" "toOfflinePush")
partitions=8 partitions=8
replicationFactor=1 replicationFactor=1

View File

@ -1,92 +0,0 @@
#!/usr/bin/env bash
# Copyright © 2023 OpenIMSDK.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
#
# Store this file as .git/hooks/commit-msg in your repository in order to
# enforce checking for proper commit message format before actual commits.
# You may need to make the scripts executable by 'chmod +x .git/hooks/commit-msg'.
# commit-msg use go-gitlint tool, install go-gitlint via `go get github.com/llorllale/go-gitlint/cmd/go-gitlint`
# go-gitlint --msg-file="$1"
# An example hook scripts to check the commit log message.
# Called by "git commit" with one argument, the name of the file
# that has the commit message. The hook should exit with non-zero
# status after issuing an appropriate message if it wants to stop the
# commit. The hook is allowed to edit the commit message file.
YELLOW="\e[93m"
GREEN="\e[32m"
RED="\e[31m"
ENDCOLOR="\e[0m"
printMessage() {
printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n"
}
printSuccess() {
printf "${GREEN}OpenIM : $1${ENDCOLOR}\n"
}
printError() {
printf "${RED}OpenIM : $1${ENDCOLOR}\n"
}
printMessage "Running the OpenIM commit-msg hook."
# This example catches duplicate Signed-off-by lines.
test "" = "$(grep '^Signed-off-by: ' "$1" |
sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
echo >&2 Duplicate Signed-off-by lines.
exit 1
}
# TODO: go-gitlint dir set
OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
GITLINT_DIR="$OPENIM_ROOT/_output/tools/go-gitlint"
$GITLINT_DIR \
--msg-file=$1 \
--subject-regex="^(build|chore|ci|docs|feat|feature|fix|perf|refactor|revert|style|bot|test)(.*)?:\s?.*" \
--subject-maxlen=150 \
--subject-minlen=10 \
--body-regex=".*" \
--max-parents=1
if [ $? -ne 0 ]
then
if ! command -v $GITLINT_DIR &>/dev/null; then
printError "$GITLINT_DIR not found. Please run 'make tools' OR 'make tools.verify.go-gitlint' make verto install it."
fi
printError "Please fix your commit message to match kubecub coding standards"
printError "https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md"
exit 1
fi
### Add Sign-off-by line to the end of the commit message
# Get local git config
NAME=$(git config user.name)
EMAIL=$(git config user.email)
# Check if the commit message contains a sign-off line
grep -qs "^Signed-off-by: " "$1"
SIGNED_OFF_BY_EXISTS=$?
# Add "Signed-off-by" line if it doesn't exist
if [ $SIGNED_OFF_BY_EXISTS -ne 0 ]; then
echo -e "\nSigned-off-by: $NAME <$EMAIL>" >> "$1"
fi

View File

@ -1,111 +0,0 @@
#!/usr/bin/env bash
# Copyright © 2023 OpenIMSDK.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
# This is a pre-commit hook that ensures attempts to commit files that are
# are larger than $limit to your _local_ repo fail, with a helpful error message.
# You can override the default limit of 2MB by supplying the environment variable:
# GIT_FILE_SIZE_LIMIT=50000000 git commit -m "test: this commit is allowed file sizes up to 50MB"
#
# ==============================================================================
#
LC_ALL=C
local_branch="$(git rev-parse --abbrev-ref HEAD)"
valid_branch_regex="^(main|master|develop|release(-[a-zA-Z0-9._-]+)?)$|(feature|feat|openim|hotfix|test|bug|bot|refactor|revert|ci|cicd|style|)\/[a-z0-9._-]+$|^HEAD$"
YELLOW="\e[93m"
GREEN="\e[32m"
RED="\e[31m"
ENDCOLOR="\e[0m"
printMessage() {
printf "${YELLOW}openim : $1${ENDCOLOR}\n"
}
printSuccess() {
printf "${GREEN}openim : $1${ENDCOLOR}\n"
}
printError() {
printf "${RED}openim : $1${ENDCOLOR}\n"
}
printMessage "Running local openim pre-commit hook."
# flutter format .
# https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694#file-githook-md
# TODO! GIT_FILE_SIZE_LIMIT=50000000 git commit -m "test: this commit is allowed file sizes up to 50MB"
# Maximum file size limit in bytes
limit=${GIT_FILE_SIZE_LIMIT:-2000000} # Default 2MB
limitInMB=$(( $limit / 1000000 ))
function file_too_large(){
filename=$0
filesize=$(( $1 / 2**20 ))
cat <<HEREDOC
File $filename is $filesize MB, which is larger than github's maximum
file size (2 MB). We will not be able to push this file to GitHub.
Commit aborted
HEREDOC
git status
}
# Move to the repo root so git files paths make sense
repo_root=$( git rev-parse --show-toplevel )
cd $repo_root
empty_tree=$( git hash-object -t tree /dev/null )
if git rev-parse --verify HEAD > /dev/null 2>&1
then
against=HEAD
else
against="$empty_tree"
fi
# Set split so that for loop below can handle spaces in file names by splitting on line breaks
IFS='
'
shouldFail=false
for file in $( git diff-index --cached --name-only $against ); do
file_size=$(([ ! -f $file ] && echo 0) || (ls -la $file | awk '{ print $5 }'))
if [ "$file_size" -gt "$limit" ]; then
printError "File $file is $(( $file_size / 10**6 )) MB, which is larger than our configured limit of $limitInMB MB"
shouldFail=true
fi
done
if $shouldFail
then
printMessage "If you really need to commit this file, you can override the size limit by setting the GIT_FILE_SIZE_LIMIT environment variable, e.g. GIT_FILE_SIZE_LIMIT=42000000 for 42MB. Or, commit with the --no-verify switch to skip the check entirely."
printError "Commit aborted"
exit 1;
fi
if [[ ! $local_branch =~ $valid_branch_regex ]]
then
printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex.
Your commit will be rejected. You should rename your branch to a valid name(feat/name OR fix/name) and try again."
printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694"
exit 1
fi

View File

@ -1,119 +0,0 @@
#!/usr/bin/env bash
# Copyright © 2023 OpenIMSDK.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
#
YELLOW="\e[93m"
GREEN="\e[32m"
RED="\e[31m"
ENDCOLOR="\e[0m"
local_branch="$(git rev-parse --abbrev-ref HEAD)"
valid_branch_regex="^(main|master|develop|release(-[a-zA-Z0-9._-]+)?)$|(feature|feat|openim|hotfix|test|bug|ci|cicd|style|)\/[a-z0-9._-]+$|^HEAD$"
printMessage() {
printf "${YELLOW}OpenIM : $1${ENDCOLOR}\n"
}
printSuccess() {
printf "${GREEN}OpenIM : $1${ENDCOLOR}\n"
}
printError() {
printf "${RED}OpenIM : $1${ENDCOLOR}\n"
}
printMessage "Running local OpenIM pre-push hook."
if [[ $(git status --porcelain) ]]; then
printError "This scripts needs to run against committed code only. Please commit or stash you changes."
exit 1
fi
COLOR_SUFFIX="\033[0m"
BLACK_PREFIX="\033[30m"
RED_PREFIX="\033[31m"
GREEN_PREFIX="\033[32m"
BACKGROUND_GREEN="\033[33m"
BLUE_PREFIX="\033[34m"
PURPLE_PREFIX="\033[35m"
SKY_BLUE_PREFIX="\033[36m"
WHITE_PREFIX="\033[37m"
BOLD_PREFIX="\033[1m"
UNDERLINE_PREFIX="\033[4m"
ITALIC_PREFIX="\033[3m"
# Function to print colored text
print_color() {
local text=$1
local color=$2
echo -e "${color}${text}${COLOR_SUFFIX}"
}
# Function to print section separator
print_separator() {
print_color "==========================================================" ${PURPLE_PREFIX}
}
# Get current time
time=$(date +"%Y-%m-%d %H:%M:%S")
# Print section separator
print_separator
# Print time of submission
print_color "PTIME: ${time}" "${BOLD_PREFIX}${CYAN_PREFIX}"
echo ""
author=$(git config user.name)
repository=$(basename -s .git $(git config --get remote.origin.url))
# Print additional information if needed
print_color "Repository: ${repository}" "${BLUE_PREFIX}"
echo ""
print_color "Author: ${author}" "${PURPLE_PREFIX}"
# Print section separator
print_separator
file_list=$(git diff --name-status HEAD @{u})
added_files=$(grep -c '^A' <<< "$file_list")
modified_files=$(grep -c '^M' <<< "$file_list")
deleted_files=$(grep -c '^D' <<< "$file_list")
print_color "Added Files: ${added_files}" "${BACKGROUND_GREEN}"
print_color "Modified Files: ${modified_files}" "${BACKGROUND_GREEN}"
print_color "Deleted Files: ${deleted_files}" "${BACKGROUND_GREEN}"
if [[ ! $local_branch =~ $valid_branch_regex ]]
then
printError "There is something wrong with your branch name. Branch names in this project must adhere to this contract: $valid_branch_regex.
Your commit will be rejected. You should rename your branch to a valid name(feat/name OR fix/name) and try again."
printError "For more on this, read on: https://gist.github.com/cubxxw/126b72104ac0b0ca484c9db09c3e5694"
exit 1
fi
#
#printMessage "Running the Flutter analyzer"
#flutter analyze
#
#if [ $? -ne 0 ]; then
# printError "Flutter analyzer error"
# exit 1
#fi
#
#printMessage "Finished running the Flutter analyzer"

View File

@ -3,8 +3,8 @@ serviceBinaries:
openim-crontask: 1 openim-crontask: 1
openim-rpc-user: 1 openim-rpc-user: 1
openim-msggateway: 1 openim-msggateway: 1
openim-push: 4 openim-push: 8
openim-msgtransfer: 4 openim-msgtransfer: 8
openim-rpc-conversation: 1 openim-rpc-conversation: 1
openim-rpc-auth: 1 openim-rpc-auth: 1
openim-rpc-group: 1 openim-rpc-group: 1

View File

@ -53,7 +53,6 @@ type User struct {
// UserRegisterRequest represents a request to register a user. // UserRegisterRequest represents a request to register a user.
type UserRegisterRequest struct { type UserRegisterRequest struct {
Secret string `json:"secret"`
Users []User `json:"users"` Users []User `json:"users"`
} }
@ -109,7 +108,6 @@ func RegisterUser(token, userID, nickname, faceURL string) error {
FaceURL: faceURL, FaceURL: faceURL,
} }
reqBody := UserRegisterRequest{ reqBody := UserRegisterRequest{
Secret: SecretKey,
Users: []User{user}, Users: []User{user},
} }
reqBytes, err := json.Marshal(reqBody) reqBytes, err := json.Marshal(reqBody)

View File

@ -18,6 +18,12 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/cmd" "github.com/openimsdk/open-im-server/v3/pkg/common/cmd"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
@ -27,11 +33,6 @@ import (
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"io/ioutil"
"log"
"os"
"path/filepath"
"time"
) )
const maxRetry = 180 const maxRetry = 180
@ -65,7 +66,7 @@ func CheckMinIO(ctx context.Context, config *config.Minio) error {
} }
func CheckKafka(ctx context.Context, conf *config.Kafka) error { func CheckKafka(ctx context.Context, conf *config.Kafka) error {
return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic}) return kafka.Check(ctx, conf.Build(), []string{conf.ToMongoTopic, conf.ToRedisTopic, conf.ToPushTopic, conf.ToOfflinePushTopic})
} }
func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka, *config.Minio, *config.Discovery, error) { func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka, *config.Minio, *config.Discovery, error) {