From 6619183a487505722cfa9dbb2ecf17ff00152358 Mon Sep 17 00:00:00 2001 From: wangchuxiao Date: Wed, 15 Feb 2023 15:52:32 +0800 Subject: [PATCH] errcode --- cmd/api/main.go | 12 +- cmd/crontask/main.go | 2 +- cmd/msgtransfer/main.go | 6 +- cmd/rpc/auth/main.go | 4 +- cmd/rpc/conversation/main.go | 4 +- cmd/rpc/friend/main.go | 4 +- cmd/rpc/group/main.go | 4 +- cmd/rpc/msg/main.go | 4 +- cmd/rpc/user/main.go | 4 +- config/config.yaml | 7 - config/notification.yaml | 1 - internal/crontask/clear_msg.go | 211 ++---- internal/crontask/clear_msg_test.go | 9 +- internal/crontask/cron_task.go | 63 +- internal/msggateway/init.go | 4 +- internal/msggateway/logic.go | 8 +- internal/msggateway/relay_rpc_server.go | 28 +- internal/msggateway/ws_server.go | 6 +- internal/msgtransfer/db.go | 29 - internal/msgtransfer/init.go | 108 +-- internal/msgtransfer/modify_msg_handler.go | 34 +- .../msgtransfer/online_history_msg_handler.go | 439 ++---------- .../online_msg_to_mongo_handler.go | 36 +- .../msgtransfer/persistent_msg_handler.go | 36 +- internal/push/logic/init.go | 8 +- internal/push/logic/push_rpc_server.go | 10 +- internal/push/logic/push_to_client.go | 10 +- internal/rpc/auth/auth.go | 14 +- internal/rpc/conversation/conversaion.go | 17 +- internal/rpc/friend/friend.go | 10 +- internal/rpc/group/callback.go | 2 +- internal/rpc/group/group.go | 12 +- internal/rpc/msg/pull_message.go | 22 +- internal/rpc/msg/query_msg.go | 12 +- internal/rpc/msg/rpc_chat.go | 40 +- internal/rpc/msg/send_msg.go | 32 +- internal/rpc/user/callback.go | 1 - internal/rpc/user/user.go | 3 +- pkg/common/config/config.go | 5 - pkg/common/db/cache/conversation.go | 6 +- pkg/common/db/cache/redis.go | 207 +++--- pkg/common/db/cache/token.go | 4 +- pkg/common/db/controller/extend_msg.go | 99 +++ pkg/common/db/controller/group.go | 18 +- pkg/common/db/controller/msg.go | 572 +++++++++++++-- pkg/common/db/controller/user.go | 13 + pkg/common/db/relation/user_model_k.go | 7 + .../db/table/unrelation/extend_msg_set.go | 36 +- pkg/common/db/table/unrelation/msg.go | 92 ++- pkg/common/db/table/unrelation/super_group.go | 4 - pkg/common/db/unrelation/batch_insert_chat.go | 171 ----- pkg/common/db/unrelation/extend_msg.go | 26 +- pkg/common/db/unrelation/mongo.go | 6 +- pkg/common/db/unrelation/mongo_model.go | 670 ------------------ pkg/common/db/unrelation/msg.go | 580 ++------------- pkg/common/db/unrelation/super_group.go | 21 +- pkg/common/http/http_client.go | 2 +- pkg/common/kafka/producer.go | 4 +- pkg/common/{prometheus => prome}/gather.go | 2 +- pkg/common/{prometheus => prome}/grpc.go | 2 +- .../{prometheus => prome}/prometheus.go | 2 +- pkg/utils/utils.go | 5 + 62 files changed, 1319 insertions(+), 2491 deletions(-) delete mode 100644 internal/msgtransfer/db.go delete mode 100644 internal/rpc/user/callback.go create mode 100644 pkg/common/db/controller/extend_msg.go delete mode 100644 pkg/common/db/unrelation/batch_insert_chat.go delete mode 100644 pkg/common/db/unrelation/mongo_model.go rename pkg/common/{prometheus => prome}/gather.go (99%) rename pkg/common/{prometheus => prome}/grpc.go (98%) rename pkg/common/{prometheus => prome}/prometheus.go (98%) diff --git a/cmd/api/main.go b/cmd/api/main.go index aaff71bf8..a9430f3c0 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -28,7 +28,7 @@ import ( "github.com/gin-gonic/gin" //"syscall" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" ) // @title open-IM-Server API @@ -50,11 +50,11 @@ func main() { log.Info("load config: ", config.Config) r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) if config.Config.Prometheus.Enable { - promePkg.NewApiRequestCounter() - promePkg.NewApiRequestFailedCounter() - promePkg.NewApiRequestSuccessCounter() - r.Use(promePkg.PromeTheusMiddleware) - r.GET("/metrics", promePkg.PrometheusHandler()) + prome.NewApiRequestCounter() + prome.NewApiRequestFailedCounter() + prome.NewApiRequestSuccessCounter() + r.Use(prome.PromeTheusMiddleware) + r.GET("/metrics", prome.PrometheusHandler()) } // user routing group, which handles user registration and login services userRouterGroup := r.Group("/user") diff --git a/cmd/crontask/main.go b/cmd/crontask/main.go index 8428f0b56..6dcc33ddc 100644 --- a/cmd/crontask/main.go +++ b/cmd/crontask/main.go @@ -1,7 +1,7 @@ package main import ( - "Open_IM/internal/cron_task" + "Open_IM/internal/crontask" "flag" "fmt" "time" diff --git a/cmd/msgtransfer/main.go b/cmd/msgtransfer/main.go index 969b76119..9d5923aea 100644 --- a/cmd/msgtransfer/main.go +++ b/cmd/msgtransfer/main.go @@ -1,7 +1,7 @@ package main import ( - "Open_IM/internal/msg_transfer/logic" + "Open_IM/internal/msgtransfer" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "Open_IM/pkg/common/log" @@ -16,8 +16,8 @@ func main() { prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessageTransferPrometheusPort[0], "MessageTransferPrometheusPort default listen port") flag.Parse() log.NewPrivateLog(constant.LogFileName) - logic.Init() + msgTransfer := msgtransfer.NewMsgTransfer() fmt.Println("start msg_transfer server ", ", OpenIM version: ", constant.CurrentVersion, "\n") - logic.Run(*prometheusPort) + msgTransfer.Run(*prometheusPort) wg.Wait() } diff --git a/cmd/rpc/auth/main.go b/cmd/rpc/auth/main.go index b9485454d..4881e1789 100644 --- a/cmd/rpc/auth/main.go +++ b/cmd/rpc/auth/main.go @@ -4,7 +4,7 @@ import ( rpcAuth "Open_IM/internal/rpc/auth" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + "Open_IM/pkg/common/prome" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start auth rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/cmd/rpc/conversation/main.go b/cmd/rpc/conversation/main.go index f3aef03fc..e9ef7d201 100644 --- a/cmd/rpc/conversation/main.go +++ b/cmd/rpc/conversation/main.go @@ -4,7 +4,7 @@ import ( rpcConversation "Open_IM/internal/rpc/conversation" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start conversation rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/cmd/rpc/friend/main.go b/cmd/rpc/friend/main.go index 3b88d6964..101ffa6ff 100644 --- a/cmd/rpc/friend/main.go +++ b/cmd/rpc/friend/main.go @@ -4,7 +4,7 @@ import ( "Open_IM/internal/rpc/friend" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start friend rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := friend.NewFriendServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/cmd/rpc/group/main.go b/cmd/rpc/group/main.go index 8a5f26cfd..ad431fa16 100644 --- a/cmd/rpc/group/main.go +++ b/cmd/rpc/group/main.go @@ -4,7 +4,7 @@ import ( "Open_IM/internal/rpc/group" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start group rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := group.NewGroupServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/cmd/rpc/msg/main.go b/cmd/rpc/msg/main.go index c9bcecf9c..52c4c0b2c 100644 --- a/cmd/rpc/msg/main.go +++ b/cmd/rpc/msg/main.go @@ -4,7 +4,7 @@ import ( "Open_IM/internal/rpc/msg" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start msg rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := msg.NewRpcChatServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/cmd/rpc/user/main.go b/cmd/rpc/user/main.go index f6191280f..952b2fbb2 100644 --- a/cmd/rpc/user/main.go +++ b/cmd/rpc/user/main.go @@ -4,7 +4,7 @@ import ( "Open_IM/internal/rpc/user" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "flag" "fmt" ) @@ -17,7 +17,7 @@ func main() { fmt.Println("start user rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") rpcServer := user.NewUserServer(*rpcPort) go func() { - err := promePkg.StartPromeSrv(*prometheusPort) + err := prome.StartPromeSrv(*prometheusPort) if err != nil { panic(err) } diff --git a/config/config.yaml b/config/config.yaml index 871cc4e49..a6ddea70b 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -51,9 +51,6 @@ kafka: ws2mschat: addr: [ 127.0.0.1:9092 ] #kafka配置,默认即可 topic: "ws2ms_chat" #用于mongo和mysql保存消息 -# ws2mschatoffline: -# addr: [ 127.0.0.1:9092 ] #kafka配置,默认即可 -# topic: "ws2ms_chat_offline" #原为离线用户消息处理,目前暂时弃用 msgtomongo: addr: [ 127.0.0.1:9092 ] #kafka配置,默认即可 topic: "msg_to_mongo" @@ -223,8 +220,6 @@ push: appSecret: enable: false - - manager: #app管理员userID和对应的secret 建议修改。 用于管理后台登录,也可以用户管理后台对应的api appManagerUid: [ "openIM123456","openIM654321", "openIM333", "openIMAdmin"] @@ -238,8 +233,6 @@ multiloginpolicy: 1 #msg log insert to db chatpersistencemysql: true -#可靠性存储 -reliablestorage: false #消息缓存时间 msgCacheTimeout: 86400 #群聊已读开启 diff --git a/config/notification.yaml b/config/notification.yaml index fdbdff06e..d855cf723 100644 --- a/config/notification.yaml +++ b/config/notification.yaml @@ -1,4 +1,3 @@ - notification: groupCreated: conversation: diff --git a/internal/crontask/clear_msg.go b/internal/crontask/clear_msg.go index 72dc74abf..0431e4d73 100644 --- a/internal/crontask/clear_msg.go +++ b/internal/crontask/clear_msg.go @@ -4,172 +4,97 @@ import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db" + "Open_IM/pkg/common/db/cache" + "Open_IM/pkg/common/db/controller" "Open_IM/pkg/common/db/mongo" "Open_IM/pkg/common/log" + "Open_IM/pkg/common/tracelog" sdkws "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" + "context" "math" "strconv" "strings" - goRedis "github.com/go-redis/redis/v8" + "github.com/go-redis/redis/v8" "github.com/golang/protobuf/proto" ) -const oldestList = 0 -const newestList = -1 +type SeqCheckInterface interface { + ClearAll() error +} -func ResetUserGroupMinSeq(operationID, groupID string, userIDList []string) error { - var delStruct delMsgRecursionStruct - minSeq, err := deleteMongoMsg(operationID, groupID, oldestList, &delStruct) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), groupID, "deleteMongoMsg failed") +type ClearMsgCronTask struct { + msgModel controller.MsgInterface + userModel controller.UserInterface + groupModel controller.GroupInterface + cache cache.Cache +} + +func (c *ClearMsgCronTask) getCronTaskOperationID() string { + return cronTaskOperationID + utils.OperationIDGenerator() +} + +func (c *ClearMsgCronTask) ClearAll() { + operationID := c.getCronTaskOperationID() + ctx := context.Background() + tracelog.SetOperationID(ctx, operationID) + log.NewInfo(operationID, "========================= start del cron task =========================") + var err error + userIDList, err := c.userModel.GetAllUserID(ctx) + if err == nil { + c.StartClearMsg(operationID, userIDList) + } else { + log.NewError(operationID, utils.GetSelfFuncName(), err.Error()) } - if minSeq == 0 { - return nil + // working group msg clear + workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup) + if err == nil { + c.StartClearWorkingGroupMsg(operationID, workingGroupIDList) + } else { + log.NewError(operationID, utils.GetSelfFuncName(), err.Error()) } - log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDList:", delStruct, "minSeq", minSeq) + + log.NewInfo(operationID, "========================= start del cron finished =========================") +} + +func (c *ClearMsgCronTask) StartClearMsg(operationID string, userIDList []string) { + log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList) for _, userID := range userIDList { - userMinSeq, err := db.DB.GetGroupUserMinSeq(groupID, userID) - if err != nil && err != goRedis.Nil { - log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error()) + if err := DeleteUserMsgsAndSetMinSeq(operationID, userID); err != nil { + log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID) + } + if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil { + log.NewError(operationID, utils.GetSelfFuncName(), userID, err) + } + } +} + +func (c *ClearMsgCronTask) StartClearWorkingGroupMsg(operationID string, workingGroupIDList []string) { + log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList) + for _, groupID := range workingGroupIDList { + userIDList, err := rocksCache.GetGroupMemberIDListFromCache(groupID) + if err != nil { + log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID) continue } - if userMinSeq > uint64(minSeq) { - err = db.DB.SetGroupUserMinSeq(groupID, userID, userMinSeq) - } else { - err = db.DB.SetGroupUserMinSeq(groupID, userID, uint64(minSeq)) + log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "workingGroupIDList:", userIDList) + if err := DeleteUserSuperGroupMsgsAndSetMinSeq(operationID, groupID, userIDList); err != nil { + log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList) } - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userID, userMinSeq, minSeq) + if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { + log.NewError(operationID, utils.GetSelfFuncName(), groupID, err) } } - return nil } -func DeleteMongoMsgAndResetRedisSeq(operationID, userID string) error { - var delStruct delMsgRecursionStruct - minSeq, err := deleteMongoMsg(operationID, userID, oldestList, &delStruct) - if err != nil { - return utils.Wrap(err, "") - } - if minSeq == 0 { - return nil - } - log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDStruct: ", delStruct, "minSeq", minSeq) - err = db.DB.SetUserMinSeq(userID, minSeq) - return utils.Wrap(err, "") -} - -// del list -func delMongoMsgsPhysical(uidList []string) error { - if len(uidList) > 0 { - err := db.DB.DelMongoMsgs(uidList) - if err != nil { - return utils.Wrap(err, "DelMongoMsgs failed") - } - } - return nil -} - -type delMsgRecursionStruct struct { - minSeq uint32 - delUidList []string -} - -func (d *delMsgRecursionStruct) getSetMinSeq() uint32 { - return d.minSeq -} - -// index 0....19(del) 20...69 -// seq 70 -// set minSeq 21 -// recursion 删除list并且返回设置的最小seq -func deleteMongoMsg(operationID string, ID string, index int64, delStruct *delMsgRecursionStruct) (uint32, error) { - // find from oldest list - msgs, err := db.DB.GetUserMsgListByIndex(ID, index) - if err != nil || msgs.UID == "" { - if err != nil { - if err == mongoDB.ErrMsgListNotExist { - log.NewInfo(operationID, utils.GetSelfFuncName(), "ID:", ID, "index:", index, err.Error()) - } else { - log.NewError(operationID, utils.GetSelfFuncName(), "GetUserMsgListByIndex failed", err.Error(), index, ID) - } - } - // 获取报错,或者获取不到了,物理删除并且返回seq - err = delMongoMsgsPhysical(delStruct.delUidList) - if err != nil { - return 0, err - } - return delStruct.getSetMinSeq() + 1, nil - } - log.NewDebug(operationID, "ID:", ID, "index:", index, "uid:", msgs.UID, "len:", len(msgs.Msg)) - if len(msgs.Msg) > mongoDB.GetSingleGocMsgNum() { - log.NewWarn(operationID, utils.GetSelfFuncName(), "msgs too large", len(msgs.Msg), msgs.UID) - } - if msgs.Msg[len(msgs.Msg)-1].SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) > utils.GetCurrentTimestampByMill() && msgListIsFull(msgs) { - delStruct.delUidList = append(delStruct.delUidList, msgs.UID) - lastMsgPb := &sdkws.MsgData{} - err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID) - return 0, utils.Wrap(err, "proto.Unmarshal failed") - } - delStruct.minSeq = lastMsgPb.Seq - } else { - var hasMarkDelFlag bool - for _, msg := range msgs.Msg { - msgPb := &sdkws.MsgData{} - err = proto.Unmarshal(msg.Msg, msgPb) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID) - return 0, utils.Wrap(err, "proto.Unmarshal failed") - } - if utils.GetCurrentTimestampByMill() > msg.SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) { - msgPb.Status = constant.MsgDeleted - bytes, _ := proto.Marshal(msgPb) - msg.Msg = bytes - msg.SendTime = 0 - hasMarkDelFlag = true - } else { - if err := delMongoMsgsPhysical(delStruct.delUidList); err != nil { - return 0, err - } - if hasMarkDelFlag { - if err := db.DB.UpdateOneMsgList(msgs); err != nil { - return delStruct.getSetMinSeq(), utils.Wrap(err, "") - } - } - return msgPb.Seq + 1, nil - } - } - } - log.NewDebug(operationID, ID, "continue to", delStruct) - // 继续递归 index+1 - seq, err := deleteMongoMsg(operationID, ID, index+1, delStruct) - return seq, utils.Wrap(err, "deleteMongoMsg failed") -} - -func msgListIsFull(chat *mongoDB.UserChat) bool { - index, _ := strconv.Atoi(strings.Split(chat.UID, ":")[1]) - if index == 0 { - if len(chat.Msg) >= 4999 { - return true - } - } - if len(chat.Msg) >= 5000 { - return true - } - return false -} - -func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error { +func checkMaxSeqWithMongo(operationID, sourceID string, diffusionType int) error { var seqRedis uint64 var err error if diffusionType == constant.WriteDiffusion { - seqRedis, err = db.DB.GetUserMaxSeq(ID) + seqRedis, err = db.DB.GetUserMaxSeq(sourceID) } else { - seqRedis, err = db.DB.GetGroupMaxSeq(ID) + seqRedis, err = db.DB.GetGroupMaxSeq(sourceID) } if err != nil { if err == goRedis.Nil { @@ -177,7 +102,7 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error { } return utils.Wrap(err, "GetUserMaxSeq failed") } - msg, err := db.DB.GetNewestMsg(ID) + msg, err := db.DB.GetNewestMsg(sourceID) if err != nil { return utils.Wrap(err, "GetNewestMsg failed") } @@ -185,9 +110,9 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error { return nil } if math.Abs(float64(msg.Seq-uint32(seqRedis))) > 10 { - log.NewWarn(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "redis maxSeq is different with msg.Seq > 10", "status: ", msg.Status, msg.SendTime) + log.NewWarn(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, sourceID, "redis maxSeq is different with msg.Seq > 10", "status: ", msg.Status, msg.SendTime) } else { - log.NewInfo(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "seq and msg OK", "status:", msg.Status, msg.SendTime) + log.NewInfo(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, sourceID, "seq and msg OK", "status:", msg.Status, msg.SendTime) } return nil } diff --git a/internal/crontask/clear_msg_test.go b/internal/crontask/clear_msg_test.go index 0079c01a2..be0be8197 100644 --- a/internal/crontask/clear_msg_test.go +++ b/internal/crontask/clear_msg_test.go @@ -2,8 +2,7 @@ package cronTask import ( "Open_IM/pkg/common/constant" - mongo2 "Open_IM/pkg/common/db/mongo" - sdkws "Open_IM/pkg/proto/sdkws" + "Open_IM/pkg/proto/sdkws" "context" "fmt" "strconv" @@ -59,7 +58,7 @@ func CreateChat(userChat *mongo2.UserChat) error { return err } -func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) { +func TestDeleteUserMsgsAndSetMinSeq(t *testing.T) { operationID := getCronTaskOperationID() redisClient = redis.NewClient(&redis.Options{ Addr: "127.0.0.1:16379", @@ -81,7 +80,7 @@ func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) { userChat := GenUserChat(1, 500, 200, 0, testUID1) err = CreateChat(userChat) - if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID1); err != nil { + if err := DeleteUserMsgsAndSetMinSeq(operationID, testUID1); err != nil { t.Error("checkMaxSeqWithMongo failed", testUID1) } if err := checkMaxSeqWithMongo(operationID, testUID1, constant.WriteDiffusion); err != nil { @@ -94,7 +93,7 @@ func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) { // for _, groupID := range testWorkingGroupIDList { // operationID = groupID + "-" + operationID // log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "userIDList:", testUserIDList) - // if err := ResetUserGroupMinSeq(operationID, groupID, testUserIDList); err != nil { + // if err := DeleteUserSuperGroupMsgsAndSetMinSeq(operationID, groupID, testUserIDList); err != nil { // t.Error("checkMaxSeqWithMongo failed", groupID) // } // if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { diff --git a/internal/crontask/cron_task.go b/internal/crontask/cron_task.go index 2a54b2c26..ece45f399 100644 --- a/internal/crontask/cron_task.go +++ b/internal/crontask/cron_task.go @@ -2,10 +2,6 @@ package cronTask import ( "Open_IM/pkg/common/config" - "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db/controller" - "Open_IM/pkg/common/db/mysql_model/im_mysql_model" - rocksCache "Open_IM/pkg/common/db/rocks_cache" "Open_IM/pkg/common/log" "Open_IM/pkg/utils" "fmt" @@ -15,9 +11,10 @@ import ( ) const cronTaskOperationID = "cronTaskOperationID-" +const moduleName = "cron" func StartCronTask(userID, workingGroupID string) { - log.NewPrivateLog("cron") + log.NewPrivateLog(moduleName) log.NewInfo(utils.OperationIDGenerator(), "start cron task", "cron config", config.Config.Mongo.ChatRecordsClearTime) fmt.Println("cron task start, config", config.Config.Mongo.ChatRecordsClearTime) if userID != "" { @@ -44,59 +41,3 @@ func StartCronTask(userID, workingGroupID string) { time.Sleep(10 * time.Second) } } - -func getCronTaskOperationID() string { - return cronTaskOperationID + utils.OperationIDGenerator() -} - -func ClearAll() { - operationID := getCronTaskOperationID() - log.NewInfo(operationID, "========================= start del cron task =========================") - var err error - userIDList, err := im_mysql_model.SelectAllUserID() - if err == nil { - StartClearMsg(operationID, userIDList) - } else { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error()) - } - - // working group msg clear - workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup) - if err == nil { - StartClearWorkingGroupMsg(operationID, workingGroupIDList) - } else { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error()) - } - - log.NewInfo(operationID, "========================= start del cron finished =========================") -} - -func StartClearMsg(operationID string, userIDList []string) { - log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList) - for _, userID := range userIDList { - if err := DeleteMongoMsgAndResetRedisSeq(operationID, userID); err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID) - } - if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), userID, err) - } - } -} - -func StartClearWorkingGroupMsg(operationID string, workingGroupIDList []string) { - log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList) - for _, groupID := range workingGroupIDList { - userIDList, err := rocksCache.GetGroupMemberIDListFromCache(groupID) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID) - continue - } - log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "workingGroupIDList:", userIDList) - if err := ResetUserGroupMinSeq(operationID, groupID, userIDList); err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList) - } - if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), groupID, err) - } - } -} diff --git a/internal/msggateway/init.go b/internal/msggateway/init.go index d68a359f3..5f2765fe8 100644 --- a/internal/msggateway/init.go +++ b/internal/msggateway/init.go @@ -8,7 +8,7 @@ import ( "fmt" "sync" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "github.com/go-playground/validator/v10" ) @@ -40,7 +40,7 @@ func Run(promethuesPort int) { go ws.run() go rpcSvr.run() go func() { - err := promePkg.StartPromeSrv(promethuesPort) + err := prome.StartPromeSrv(promethuesPort) if err != nil { panic(err) } diff --git a/internal/msggateway/logic.go b/internal/msggateway/logic.go index 2109efe3d..5752ce74f 100644 --- a/internal/msggateway/logic.go +++ b/internal/msggateway/logic.go @@ -5,7 +5,7 @@ import ( "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + "Open_IM/pkg/common/prome" pbChat "Open_IM/pkg/proto/msg" push "Open_IM/pkg/proto/push" pbRtc "Open_IM/pkg/proto/rtc" @@ -51,18 +51,18 @@ func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) { case constant.WSGetNewestSeq: log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) ws.getSeqReq(conn, &m) - promePkg.PromeInc(promePkg.GetNewestSeqTotalCounter) + prome.PromeInc(prome.GetNewestSeqTotalCounter) case constant.WSSendMsg: log.NewInfo(m.OperationID, "sendMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) ws.sendMsgReq(conn, &m) - promePkg.PromeInc(promePkg.MsgRecvTotalCounter) + prome.PromeInc(prome.MsgRecvTotalCounter) case constant.WSSendSignalMsg: log.NewInfo(m.OperationID, "sendSignalMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) ws.sendSignalMsgReq(conn, &m) case constant.WSPullMsgBySeqList: log.NewInfo(m.OperationID, "pullMsgBySeqListReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) ws.pullMsgBySeqListReq(conn, &m) - promePkg.PromeInc(promePkg.PullMsgBySeqListTotalCounter) + prome.PromeInc(prome.PullMsgBySeqListTotalCounter) case constant.WsLogoutMsg: log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier) ws.userLogoutReq(conn, &m) diff --git a/internal/msggateway/relay_rpc_server.go b/internal/msggateway/relay_rpc_server.go index 04da8ff77..8a95c1292 100644 --- a/internal/msggateway/relay_rpc_server.go +++ b/internal/msggateway/relay_rpc_server.go @@ -4,7 +4,7 @@ import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prome" "Open_IM/pkg/common/tokenverify" pbRelay "Open_IM/pkg/proto/relay" sdkws "Open_IM/pkg/proto/sdkws" @@ -34,14 +34,14 @@ type RPCServer struct { } func initPrometheus() { - promePkg.NewMsgRecvTotalCounter() - promePkg.NewGetNewestSeqTotalCounter() - promePkg.NewPullMsgBySeqListTotalCounter() - promePkg.NewMsgOnlinePushSuccessCounter() - promePkg.NewOnlineUserGauges() - //promePkg.NewSingleChatMsgRecvSuccessCounter() - //promePkg.NewGroupChatMsgRecvSuccessCounter() - //promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter() + prome.NewMsgRecvTotalCounter() + prome.NewGetNewestSeqTotalCounter() + prome.NewPullMsgBySeqListTotalCounter() + prome.NewMsgOnlinePushSuccessCounter() + prome.NewOnlineUserGauges() + //prome.NewSingleChatMsgRecvSuccessCounter() + //prome.NewGroupChatMsgRecvSuccessCounter() + //prome.NewWorkSuperGroupChatMsgRecvSuccessCounter() } func (r *RPCServer) onInit(rpcPort int) { @@ -67,11 +67,11 @@ func (r *RPCServer) run() { defer listener.Close() var grpcOpts []grpc.ServerOption if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) @@ -205,7 +205,7 @@ func (r *RPCServer) SuperGroupOnlineBatchPushOneMsg(_ context.Context, req *pbRe resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v) if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) { tempT.OnlinePush = true - promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter) + prome.PromeInc(prome.MsgOnlinePushSuccessCounter) log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v) temp.ResultCode = resultCode resp = append(resp, temp) diff --git a/internal/msggateway/ws_server.go b/internal/msggateway/ws_server.go index 99bc8fa28..6e4630a5f 100644 --- a/internal/msggateway/ws_server.go +++ b/internal/msggateway/ws_server.go @@ -5,7 +5,7 @@ import ( "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/tokenverify" pbRelay "Open_IM/pkg/proto/relay" "Open_IM/pkg/utils" @@ -352,7 +352,7 @@ func (ws *WServer) addUserConn(uid string, platformID int, conn *UserConn, token for _, v := range ws.wsUserToConn { count = count + len(v) } - promePkg.PromeGaugeInc(promePkg.OnlineUserGauge) + prome.PromeGaugeInc(prome.OnlineUserGauge) log.Debug(operationID, "WS Add operation", "", "wsUser added", ws.wsUserToConn, "connection_uid", uid, "connection_platform", constant.PlatformIDToName(platformID), "online_user_num", len(ws.wsUserToConn), "online_conn_num", count) } @@ -394,7 +394,7 @@ func (ws *WServer) delUserConn(conn *UserConn) { if callbackResp.ErrCode != 0 { log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp) } - promePkg.PromeGaugeDec(promePkg.OnlineUserGauge) + prome.PromeGaugeDec(prome.OnlineUserGauge) } diff --git a/internal/msgtransfer/db.go b/internal/msgtransfer/db.go deleted file mode 100644 index d403ecb47..000000000 --- a/internal/msgtransfer/db.go +++ /dev/null @@ -1,29 +0,0 @@ -package msgtransfer - -import ( - "Open_IM/pkg/common/db" - "Open_IM/pkg/common/log" - pbMsg "Open_IM/pkg/proto/msg" - "Open_IM/pkg/utils" -) - -func saveUserChat(uid string, msg *pbMsg.MsgDataToMQ) error { - time := utils.GetCurrentTimestampByMill() - seq, err := db.DB.IncrUserSeq(uid) - if err != nil { - log.NewError(msg.OperationID, "data insert to redis err", err.Error(), msg.String()) - return err - } - msg.MsgData.Seq = uint32(seq) - pbSaveData := pbMsg.MsgDataToDB{} - pbSaveData.MsgData = msg.MsgData - log.NewInfo(msg.OperationID, "IncrUserSeq cost time", utils.GetCurrentTimestampByMill()-time) - return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData) - // return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData) -} - -func saveUserChatList(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) (error, uint64) { - log.Info(operationID, utils.GetSelfFuncName(), "args ", userID, len(msgList)) - //return db.DB.BatchInsertChat(userID, msgList, operationID) - return db.DB.BatchInsertChat2Cache(userID, msgList, operationID) -} diff --git a/internal/msgtransfer/init.go b/internal/msgtransfer/init.go index 86a576c65..a94494f9b 100644 --- a/internal/msgtransfer/init.go +++ b/internal/msgtransfer/init.go @@ -2,83 +2,53 @@ package msgtransfer import ( "Open_IM/pkg/common/config" - "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/kafka" - promePkg "Open_IM/pkg/common/prometheus" - "Open_IM/pkg/statistics" + "Open_IM/pkg/common/prome" "fmt" - "sync" ) -const OnlineTopicBusy = 1 -const OnlineTopicVacancy = 0 -const Msg = 2 -const ConsumerMsgs = 3 -const AggregationMessages = 4 -const MongoMessages = 5 -const ChannelNum = 100 - -var ( - persistentCH PersistentConsumerHandler - historyCH OnlineHistoryRedisConsumerHandler - historyMongoCH OnlineHistoryMongoConsumerHandler - modifyCH ModifyMsgConsumerHandler - producer *kafka.Producer - producerToModify *kafka.Producer - producerToMongo *kafka.Producer - cmdCh chan Cmd2Value - onlineTopicStatus int - w *sync.Mutex - singleMsgSuccessCount uint64 - groupMsgCount uint64 - singleMsgFailedCount uint64 - - singleMsgSuccessCountMutex sync.Mutex -) - -func Init() { - cmdCh = make(chan Cmd2Value, 10000) - w = new(sync.Mutex) - if config.Config.Prometheus.Enable { - initPrometheus() - } - persistentCH.Init() // ws2mschat save mysql - historyCH.Init(cmdCh) // - historyMongoCH.Init() - modifyCH.Init() - onlineTopicStatus = OnlineTopicVacancy - //offlineHistoryCH.Init(cmdCh) - statistics.NewStatistics(&singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) - statistics.NewStatistics(&groupMsgCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second groupMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) - producer = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic) - producerToModify = kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic) - producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic) +type MsgTransfer struct { + persistentCH PersistentConsumerHandler // 聊天记录持久化到mysql的消费者 订阅的topic: ws2ms_chat + historyCH OnlineHistoryRedisConsumerHandler // 这个消费者聚合消息, 订阅的topic:ws2ms_chat, 修改通知发往msg_to_modify topic, 消息存入redis后Incr Redis, 再发消息到ms2pschat topic推送, 发消息到msg_to_mongo topic持久化 + historyMongoCH OnlineHistoryMongoConsumerHandler // mongoDB批量插入, 成功后删除redis中消息,以及处理删除通知消息删除的 订阅的topic: msg_to_mongo + modifyCH ModifyMsgConsumerHandler // 负责消费修改消息通知的consumer, 订阅的topic: msg_to_modify } -func Run(promethuesPort int) { - //register mysqlConsumerHandler to - if config.Config.ChatPersistenceMysql { - go persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(&persistentCH) - } else { - fmt.Println("not start mysql consumer") + +func NewMsgTransfer() *MsgTransfer { + msgTransfer := &MsgTransfer{} + msgTransfer.persistentCH.Init() + msgTransfer.historyCH.Init() + msgTransfer.historyMongoCH.Init() + msgTransfer.modifyCH.Init() + if config.Config.Prometheus.Enable { + msgTransfer.initPrometheus() } - go historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyCH) - go historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyMongoCH) - go modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(&modifyCH) - //go offlineHistoryCH.historyConsumerGroup.RegisterHandleAndConsumer(&offlineHistoryCH) + return msgTransfer +} + +func (m *MsgTransfer) initPrometheus() { + prome.NewSeqGetSuccessCounter() + prome.NewSeqGetFailedCounter() + prome.NewSeqSetSuccessCounter() + prome.NewSeqSetFailedCounter() + prome.NewMsgInsertRedisSuccessCounter() + prome.NewMsgInsertRedisFailedCounter() + prome.NewMsgInsertMongoSuccessCounter() + prome.NewMsgInsertMongoFailedCounter() +} + +func (m *MsgTransfer) Run(promePort int) { + if config.Config.ChatPersistenceMysql { + go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(&m.persistentCH) + } else { + fmt.Println("msg transfer not start mysql consumer") + } + go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&m.historyCH) + go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&m.historyMongoCH) + go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(&m.modifyCH) go func() { - err := promePkg.StartPromeSrv(promethuesPort) + err := prome.StartPromeSrv(promePort) if err != nil { panic(err) } }() } -func SetOnlineTopicStatus(status int) { - w.Lock() - defer w.Unlock() - onlineTopicStatus = status -} -func GetOnlineTopicStatus() int { - w.Lock() - defer w.Unlock() - return onlineTopicStatus -} diff --git a/internal/msgtransfer/modify_msg_handler.go b/internal/msgtransfer/modify_msg_handler.go index 796617175..31b013115 100644 --- a/internal/msgtransfer/modify_msg_handler.go +++ b/internal/msgtransfer/modify_msg_handler.go @@ -1,14 +1,19 @@ package msgtransfer import ( + "Open_IM/pkg/apistruct" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db" + "Open_IM/pkg/common/db/cache" + "Open_IM/pkg/common/db/controller" + unRelationTb "Open_IM/pkg/common/db/table/unrelation" kfk "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/log" + "Open_IM/pkg/common/tracelog" pbMsg "Open_IM/pkg/proto/msg" sdkws "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" + "context" "encoding/json" "github.com/Shopify/sarama" @@ -16,13 +21,13 @@ import ( ) type ModifyMsgConsumerHandler struct { - msgHandle map[string]fcb modifyMsgConsumerGroup *kfk.MConsumerGroup + + extendMsgInterface controller.ExtendMsgInterface + cache cache.Cache } func (mmc *ModifyMsgConsumerHandler) Init() { - mmc.msgHandle = make(map[string]fcb) - mmc.msgHandle[config.Config.Kafka.MsgToModify.Topic] = mmc.ModifyMsg mmc.modifyMsgConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToModify.Topic}, config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.ConsumerGroupID.MsgToModify) @@ -35,7 +40,7 @@ func (mmc *ModifyMsgConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi for msg := range claim.Messages() { log.NewDebug("", "kafka get info to mysql", "ModifyMsgConsumerHandler", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) if len(msg.Value) != 0 { - mmc.msgHandle[msg.Topic](msg, string(msg.Key), sess) + mmc.ModifyMsg(msg, string(msg.Key), sess) } else { log.Error("", "msg get from kafka but is nil", msg.Key) } @@ -58,6 +63,8 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg if !isReactionFromCache { continue } + ctx := context.Background() + tracelog.SetOperationID(ctx, msgDataToMQ.OperationID) if msgDataToMQ.MsgData.ContentType == constant.ReactionMessageModifier { notification := &apistruct.ReactionMessageModifierNotification{} if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil { @@ -69,21 +76,21 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg } if !notification.IsReact { // first time to modify - var reactionExtensionList = make(map[string]mongoDB.KeyValue) - extendMsg := mongoDB.ExtendMsg{ + var reactionExtensionList = make(map[string]unRelationTb.KeyValueModel) + extendMsg := unRelationTb.ExtendMsgModel{ ReactionExtensionList: reactionExtensionList, ClientMsgID: notification.ClientMsgID, MsgFirstModifyTime: notification.MsgFirstModifyTime, } for _, v := range notification.SuccessReactionExtensionList { - reactionExtensionList[v.TypeKey] = mongoDB.KeyValue{ + reactionExtensionList[v.TypeKey] = unRelationTb.KeyValueModel{ TypeKey: v.TypeKey, Value: v.Value, LatestUpdateTime: v.LatestUpdateTime, } } - if err := db.DB.InsertExtendMsg(notification.SourceID, notification.SessionType, &extendMsg); err != nil { + if err := mmc.extendMsgInterface.InsertExtendMsg(ctx, notification.SourceID, notification.SessionType, &extendMsg); err != nil { log.NewError(msgDataToMQ.OperationID, "MsgFirstModify InsertExtendMsg failed", notification.SourceID, notification.SessionType, extendMsg, err.Error()) continue } @@ -97,7 +104,7 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg } } // is already modify - if err := db.DB.InsertOrUpdateReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, reactionExtensionList); err != nil { + if err := mmc.extendMsgInterface.InsertOrUpdateReactionExtendMsgSet(ctx, notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, reactionExtensionList); err != nil { log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed") } } @@ -106,15 +113,10 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil { continue } - if err := db.DB.DeleteReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, notification.SuccessReactionExtensionList); err != nil { + if err := mmc.extendMsgInterface.DeleteReactionExtendMsgSet(ctx, notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, notification.SuccessReactionExtensionList); err != nil { log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed") } } } } - -func UnMarshallSetReactionMsgContent(content []byte) (notification *apistruct.ReactionMessageModifierNotification, err error) { - - return notification, nil -} diff --git a/internal/msgtransfer/online_history_msg_handler.go b/internal/msgtransfer/online_history_msg_handler.go index 8536e45dc..f4c4fd09d 100644 --- a/internal/msgtransfer/online_history_msg_handler.go +++ b/internal/msgtransfer/online_history_msg_handler.go @@ -3,62 +3,80 @@ package msgtransfer import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db" - kfk "Open_IM/pkg/common/kafka" + "Open_IM/pkg/common/db/cache" + "Open_IM/pkg/common/db/controller" + "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/log" + "Open_IM/pkg/common/tracelog" pbMsg "Open_IM/pkg/proto/msg" pbPush "Open_IM/pkg/proto/push" + "Open_IM/pkg/statistics" "Open_IM/pkg/utils" "context" + "fmt" "github.com/Shopify/sarama" "github.com/golang/protobuf/proto" - "hash/crc32" - "strings" "sync" "time" ) +const ConsumerMsgs = 3 +const AggregationMessages = 4 +const MongoMessages = 5 +const ChannelNum = 100 + type MsgChannelValue struct { aggregationID string //maybe userID or super groupID triggerID string msgList []*pbMsg.MsgDataToMQ lastSeq uint64 } + type TriggerChannelValue struct { triggerID string - cmsgList []*sarama.ConsumerMessage + cMsgList []*sarama.ConsumerMessage } -type fcb func(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) + type Cmd2Value struct { Cmd int Value interface{} } + type OnlineHistoryRedisConsumerHandler struct { - msgHandle map[string]fcb - historyConsumerGroup *kfk.MConsumerGroup + historyConsumerGroup *kafka.MConsumerGroup chArrays [ChannelNum]chan Cmd2Value msgDistributionCh chan Cmd2Value + + singleMsgSuccessCount uint64 + singleMsgFailedCount uint64 + singleMsgSuccessCountMutex sync.Mutex + singleMsgFailedCountMutex sync.Mutex + + producerToPush *kafka.Producer + producerToModify *kafka.Producer + producerToMongo *kafka.Producer + + msgInterface controller.MsgInterface + cache cache.Cache } -func (och *OnlineHistoryRedisConsumerHandler) Init(cmdCh chan Cmd2Value) { - och.msgHandle = make(map[string]fcb) +func (och *OnlineHistoryRedisConsumerHandler) Init() { och.msgDistributionCh = make(chan Cmd2Value) //no buffer channel go och.MessagesDistributionHandle() for i := 0; i < ChannelNum; i++ { och.chArrays[i] = make(chan Cmd2Value, 50) go och.Run(i) } - if config.Config.ReliableStorage { - och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2Mongo - } else { - och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2MongoLowReliability - - } - och.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, + och.producerToPush = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic) + och.producerToModify = kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic) + och.producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic) + och.historyConsumerGroup = kafka.NewMConsumerGroup(&kafka.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic}, config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToRedis) + statistics.NewStatistics(&och.singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval) } + func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) { for { select { @@ -72,6 +90,8 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) { notStoragePushMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80) log.Debug(triggerID, "msg arrived channel", "channel id", channelID, msgList, msgChannelValue.aggregationID, len(msgList)) var modifyMsgList []*pbMsg.MsgDataToMQ + ctx := context.Background() + tracelog.SetOperationID(ctx, triggerID) for _, v := range msgList { log.Debug(triggerID, "msg come to storage center", v.String()) isHistory := utils.GetSwitchFromOptions(v.MsgData.Options, constant.IsHistory) @@ -84,45 +104,36 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) { notStoragePushMsgList = append(notStoragePushMsgList, v) } } - if v.MsgData.ContentType == constant.ReactionMessageModifier || v.MsgData.ContentType == constant.ReactionMessageDeleter { modifyMsgList = append(modifyMsgList, v) } } if len(modifyMsgList) > 0 { - sendMessageToModifyMQ(msgChannelValue.aggregationID, triggerID, modifyMsgList) + och.sendMessageToModifyMQ(ctx, msgChannelValue.aggregationID, triggerID, modifyMsgList) } - //switch msgChannelValue.msg.MsgData.SessionType { - //case constant.SingleChatType: - //case constant.GroupChatType: - //case constant.NotificationChatType: - //default: - // log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String()) - // return - //} log.Debug(triggerID, "msg storage length", len(storageMsgList), "push length", len(notStoragePushMsgList)) if len(storageMsgList) > 0 { - err, lastSeq := saveUserChatList(msgChannelValue.aggregationID, storageMsgList, triggerID) + lastSeq, err := och.msgInterface.BatchInsertChat2Cache(ctx, msgChannelValue.aggregationID, storageMsgList) if err != nil { - singleMsgFailedCount += uint64(len(storageMsgList)) + och.singleMsgFailedCountMutex.Lock() + och.singleMsgFailedCount += uint64(len(storageMsgList)) + och.singleMsgFailedCountMutex.Unlock() log.NewError(triggerID, "single data insert to redis err", err.Error(), storageMsgList) } else { - singleMsgSuccessCountMutex.Lock() - singleMsgSuccessCount += uint64(len(storageMsgList)) - singleMsgSuccessCountMutex.Unlock() - och.SendMessageToMongoCH(msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq) - + och.singleMsgSuccessCountMutex.Lock() + och.singleMsgSuccessCount += uint64(len(storageMsgList)) + och.singleMsgSuccessCountMutex.Unlock() + och.SendMessageToMongoCH(ctx, msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq) for _, v := range storageMsgList { - sendMessageToPushMQ(v, msgChannelValue.aggregationID) + och.sendMessageToPushMQ(ctx, v, msgChannelValue.aggregationID) } for _, x := range notStoragePushMsgList { - sendMessageToPushMQ(x, msgChannelValue.aggregationID) + och.sendMessageToPushMQ(ctx, x, msgChannelValue.aggregationID) } } - } else { - for _, x := range notStoragePushMsgList { - sendMessageToPushMQ(x, msgChannelValue.aggregationID) + for _, v := range notStoragePushMsgList { + och.sendMessageToPushMQ(ctx, v, msgChannelValue.aggregationID) } } } @@ -130,62 +141,6 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) { } } -func (och *OnlineHistoryRedisConsumerHandler) SendMessageToMongoCH(aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ, lastSeq uint64) { - if len(messages) > 0 { - pid, offset, err := producerToMongo.SendMessage(&pbMsg.MsgDataToMongoByMQ{LastSeq: lastSeq, AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID) - if err != nil { - log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID) - } else { - // log.NewWarn(m.OperationID, "sendMsgToKafka client msgID ", m.MsgData.ClientMsgID) - } - } - //hashCode := getHashCode(aggregationID) - //channelID := hashCode % ChannelNum - //log.Debug(triggerID, "generate channelID", hashCode, channelID, aggregationID) - ////go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) { - //och.chMongoArrays[channelID] <- Cmd2Value{Cmd: MongoMessages, Value: MsgChannelValue{aggregationID: aggregationID, msgList: messages, triggerID: triggerID, lastSeq: lastSeq}} -} - -//func (och *OnlineHistoryRedisConsumerHandler) MongoMessageRun(channelID int) { -// for { -// select { -// case cmd := <-och.chMongoArrays[channelID]: -// switch cmd.Cmd { -// case MongoMessages: -// msgChannelValue := cmd.Value.(MsgChannelValue) -// msgList := msgChannelValue.msgList -// triggerID := msgChannelValue.triggerID -// aggregationID := msgChannelValue.aggregationID -// lastSeq := msgChannelValue.lastSeq -// err := db.DB.BatchInsertChat2DB(aggregationID, msgList, triggerID, lastSeq) -// if err != nil { -// log.NewError(triggerID, "single data insert to mongo err", err.Error(), msgList) -// } -// for _, v := range msgList { -// if v.MsgData.ContentType == constant.DeleteMessageNotification { -// tips := sdkws.TipsComm{} -// DeleteMessageTips := sdkws.DeleteMessageTips{} -// err := proto.Unmarshal(v.MsgData.Content, &tips) -// if err != nil { -// log.NewError(triggerID, "tips unmarshal err:", err.Error(), v.String()) -// continue -// } -// err = proto.Unmarshal(tips.Detail, &DeleteMessageTips) -// if err != nil { -// log.NewError(triggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String()) -// continue -// } -// if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil { -// log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList) -// } -// -// } -// } -// } -// } -// } -//} - func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() { for { aggregationMsgs := make(map[string][]*pbMsg.MsgDataToMQ, ChannelNum) @@ -195,7 +150,7 @@ func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() { case ConsumerMsgs: triggerChannelValue := cmd.Value.(TriggerChannelValue) triggerID := triggerChannelValue.triggerID - consumerMessages := triggerChannelValue.cmsgList + consumerMessages := triggerChannelValue.cMsgList //Aggregation map[userid]message list log.Debug(triggerID, "batch messages come to distribution center", len(consumerMessages)) for i := 0; i < len(consumerMessages); i++ { @@ -218,155 +173,21 @@ func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() { log.Debug(triggerID, "generate map list users len", len(aggregationMsgs)) for aggregationID, v := range aggregationMsgs { if len(v) >= 0 { - hashCode := getHashCode(aggregationID) + hashCode := utils.GetHashCode(aggregationID) channelID := hashCode % ChannelNum log.Debug(triggerID, "generate channelID", hashCode, channelID, aggregationID) - //go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) { och.chArrays[channelID] <- Cmd2Value{Cmd: AggregationMessages, Value: MsgChannelValue{aggregationID: aggregationID, msgList: v, triggerID: triggerID}} - //}(channelID, userID, v) } } } } - - } - -} -func (mc *OnlineHistoryRedisConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) { - msg := cMsg.Value - now := time.Now() - msgFromMQ := pbMsg.MsgDataToMQ{} - err := proto.Unmarshal(msg, &msgFromMQ) - if err != nil { - log.Error("msg_transfer Unmarshal msg err", "", "msg", string(msg), "err", err.Error()) - return - } - operationID := msgFromMQ.OperationID - log.NewInfo(operationID, "msg come mongo!!!", "", "msg", string(msg)) - //Control whether to store offline messages (mongo) - isHistory := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsHistory) - //Control whether to store history messages (mysql) - isPersist := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsPersistent) - isSenderSync := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync) - switch msgFromMQ.MsgData.SessionType { - case constant.SingleChatType: - log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = SingleChatType", isHistory, isPersist) - if isHistory { - err := saveUserChat(msgKey, &msgFromMQ) - if err != nil { - singleMsgFailedCount++ - log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String()) - return - } - singleMsgSuccessCountMutex.Lock() - singleMsgSuccessCount++ - singleMsgSuccessCountMutex.Unlock() - log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now)) - } - if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID { - } else { - go sendMessageToPush(&msgFromMQ, msgKey) - } - log.NewDebug(operationID, "saveSingleMsg cost time ", time.Since(now)) - case constant.GroupChatType: - log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = GroupChatType", isHistory, isPersist) - if isHistory { - err := saveUserChat(msgFromMQ.MsgData.RecvID, &msgFromMQ) - if err != nil { - log.NewError(operationID, "group data insert to mongo err", msgFromMQ.String(), msgFromMQ.MsgData.RecvID, err.Error()) - return - } - groupMsgCount++ - } - go sendMessageToPush(&msgFromMQ, msgFromMQ.MsgData.RecvID) - log.NewDebug(operationID, "saveGroupMsg cost time ", time.Since(now)) - - case constant.NotificationChatType: - log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = NotificationChatType", isHistory, isPersist) - if isHistory { - err := saveUserChat(msgKey, &msgFromMQ) - if err != nil { - log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String()) - return - } - log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now)) - } - if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID { - } else { - go sendMessageToPush(&msgFromMQ, msgKey) - } - log.NewDebug(operationID, "saveUserChat cost time ", time.Since(now)) - default: - log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String()) - return - } - sess.MarkMessage(cMsg, "") - log.NewDebug(msgFromMQ.OperationID, "msg_transfer handle topic data to database success...", msgFromMQ.String()) -} - -func (och *OnlineHistoryRedisConsumerHandler) handleChatWs2MongoLowReliability(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) { - msg := cMsg.Value - msgFromMQ := pbMsg.MsgDataToMQ{} - err := proto.Unmarshal(msg, &msgFromMQ) - if err != nil { - log.Error("msg_transfer Unmarshal msg err", "", "msg", string(msg), "err", err.Error()) - return - } - operationID := msgFromMQ.OperationID - log.NewInfo(operationID, "msg come mongo!!!", "", "msg", string(msg)) - //Control whether to store offline messages (mongo) - isHistory := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsHistory) - isSenderSync := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync) - if isHistory { - seq, err := db.DB.IncrUserSeq(msgKey) - if err != nil { - log.NewError(operationID, "data insert to redis err", err.Error(), string(msg)) - return - } - sess.MarkMessage(cMsg, "") - msgFromMQ.MsgData.Seq = uint32(seq) - log.Debug(operationID, "send ch msg is ", msgFromMQ.String()) - //och.msgCh <- Cmd2Value{Cmd: Msg, Value: MsgChannelValue{msgKey, msgFromMQ}} - //err := saveUserChat(msgKey, &msgFromMQ) - //if err != nil { - // singleMsgFailedCount++ - // log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String()) - // return - //} - //singleMsgSuccessCountMutex.Lock() - //singleMsgSuccessCount++ - //singleMsgSuccessCountMutex.Unlock() - //log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now)) - } else { - if !(!isSenderSync && msgKey == msgFromMQ.MsgData.SendID) { - go sendMessageToPush(&msgFromMQ, msgKey) - } } } func (OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } -//func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, -// claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group -// log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition()) -// for msg := range claim.Messages() { -// SetOnlineTopicStatus(OnlineTopicBusy) -// //och.TriggerCmd(OnlineTopicBusy) -// log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset()) -// och.msgHandle[msg.Topic](msg, string(msg.Key), sess) -// if claim.HighWaterMarkOffset()-msg.Offset <= 1 { -// log.Debug("", "online msg consume end", claim.HighWaterMarkOffset(), msg.Offset) -// SetOnlineTopicStatus(OnlineTopicVacancy) -// och.TriggerCmd(OnlineTopicVacancy) -// } -// } -// return nil -//} - -func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, - claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group - +func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group for { if sess == nil { log.NewWarn("", " sess == nil, waiting ") @@ -383,24 +204,6 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerG go func() { for { select { - //case : - // triggerID = utils.OperationIDGenerator() - // - // log.NewDebug(triggerID, "claim.Messages ", msg) - // cMsg = append(cMsg, msg) - // if len(cMsg) >= 1000 { - // ccMsg := make([]*sarama.ConsumerMessage, 0, 1000) - // for _, v := range cMsg { - // ccMsg = append(ccMsg, v) - // } - // log.Debug(triggerID, "length trigger msg consumer start", len(ccMsg)) - // och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ - // triggerID: triggerID, cmsgList: ccMsg}} - // sess.MarkMessage(msg, "") - // cMsg = make([]*sarama.ConsumerMessage, 0, 1000) - // log.Debug(triggerID, "length trigger msg consumer end", len(cMsg)) - // } - case <-t.C: if len(cMsg) > 0 { rwLock.Lock() @@ -416,163 +219,53 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerG for i := 0; i < len(ccMsg)/split; i++ { //log.Debug() och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ - triggerID: triggerID, cmsgList: ccMsg[i*split : (i+1)*split]}} + triggerID: triggerID, cMsgList: ccMsg[i*split : (i+1)*split]}} } if (len(ccMsg) % split) > 0 { och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ - triggerID: triggerID, cmsgList: ccMsg[split*(len(ccMsg)/split):]}} + triggerID: triggerID, cMsgList: ccMsg[split*(len(ccMsg)/split):]}} } - //sess.MarkMessage(ccMsg[len(cMsg)-1], "") - log.Debug(triggerID, "timer trigger msg consumer end", len(cMsg)) } } } - }() for msg := range claim.Messages() { - //msgFromMQ := pbMsg.MsgDataToMQ{} - //err := proto.Unmarshal(msg.Value, &msgFromMQ) - //if err != nil { - // log.Error(triggerID, "msg_transfer Unmarshal msg err", "msg", string(msg.Value), "err", err.Error()) - //} - //userID := string(msg.Key) - //hashCode := getHashCode(userID) - //channelID := hashCode % ChannelNum - //log.Debug(triggerID, "generate channelID", hashCode, channelID, userID) - ////go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) { - //och.chArrays[channelID] <- Cmd2Value{Cmd: UserMessages, Value: MsgChannelValue{userID: userID, msgList: []*pbMsg.MsgDataToMQ{&msgFromMQ}, triggerID: msgFromMQ.OperationID}} - //sess.MarkMessage(msg, "") rwLock.Lock() if len(msg.Value) != 0 { cMsg = append(cMsg, msg) } rwLock.Unlock() sess.MarkMessage(msg, "") - //och.TriggerCmd(OnlineTopicBusy) - - //log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset()) - } - return nil } -//func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, -// claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group -// -// for { -// if sess == nil { -// log.NewWarn("", " sess == nil, waiting ") -// time.Sleep(100 * time.Millisecond) -// } else { -// break -// } -// } -// -// log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition()) -// cMsg := make([]*sarama.ConsumerMessage, 0, 1000) -// t := time.NewTicker(time.Duration(100) * time.Millisecond) -// var triggerID string -// for msg := range claim.Messages() { -// cMsg = append(cMsg, msg) -// //och.TriggerCmd(OnlineTopicBusy) -// select { -// //case : -// // triggerID = utils.OperationIDGenerator() -// // -// // log.NewDebug(triggerID, "claim.Messages ", msg) -// // cMsg = append(cMsg, msg) -// // if len(cMsg) >= 1000 { -// // ccMsg := make([]*sarama.ConsumerMessage, 0, 1000) -// // for _, v := range cMsg { -// // ccMsg = append(ccMsg, v) -// // } -// // log.Debug(triggerID, "length trigger msg consumer start", len(ccMsg)) -// // och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ -// // triggerID: triggerID, cmsgList: ccMsg}} -// // sess.MarkMessage(msg, "") -// // cMsg = make([]*sarama.ConsumerMessage, 0, 1000) -// // log.Debug(triggerID, "length trigger msg consumer end", len(cMsg)) -// // } -// -// case <-t.C: -// if len(cMsg) > 0 { -// ccMsg := make([]*sarama.ConsumerMessage, 0, 1000) -// for _, v := range cMsg { -// ccMsg = append(ccMsg, v) -// } -// triggerID = utils.OperationIDGenerator() -// log.Debug(triggerID, "timer trigger msg consumer start", len(ccMsg)) -// och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ -// triggerID: triggerID, cmsgList: ccMsg}} -// sess.MarkMessage(cMsg[len(cMsg)-1], "") -// cMsg = make([]*sarama.ConsumerMessage, 0, 1000) -// log.Debug(triggerID, "timer trigger msg consumer end", len(cMsg)) -// } -// default: -// -// } -// //log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset()) -// -// } -// return nil -//} - -func sendMessageToPush(message *pbMsg.MsgDataToMQ, pushToUserID string) { - log.Info(message.OperationID, "msg_transfer send message to push", "message", message.String()) - rpcPushMsg := pbPush.PushMsgReq{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} - mqPushMsg := pbMsg.PushMsgDataToMQ{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} - grpcConn := rpc.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImPushName, message.OperationID) - if grpcConn != nil { - log.Error(rpcPushMsg.OperationID, "rpc dial failed", "push data", rpcPushMsg.String()) - pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID) - if err != nil { - log.Error(mqPushMsg.OperationID, "kafka send failed", "send data", message.String(), "pid", pid, "offset", offset, "err", err.Error()) - } - return - } - msgClient := pbPush.NewPushMsgServiceClient(grpcConn) - _, err := msgClient.PushMsg(context.Background(), &rpcPushMsg) - if err != nil { - log.Error(rpcPushMsg.OperationID, "rpc send failed", rpcPushMsg.OperationID, "push data", rpcPushMsg.String(), "err", err.Error()) - pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID) - if err != nil { - log.Error(message.OperationID, "kafka send failed", mqPushMsg.OperationID, "send data", mqPushMsg.String(), "pid", pid, "offset", offset, "err", err.Error()) - } - } else { - log.Info(message.OperationID, "rpc send success", rpcPushMsg.OperationID, "push data", rpcPushMsg.String()) - - } -} - -func sendMessageToPushMQ(message *pbMsg.MsgDataToMQ, pushToUserID string) { +func (och *OnlineHistoryRedisConsumerHandler) sendMessageToPushMQ(ctx context.Context, message *pbMsg.MsgDataToMQ, pushToUserID string) { log.Info(message.OperationID, utils.GetSelfFuncName(), "msg ", message.String(), pushToUserID) rpcPushMsg := pbPush.PushMsgReq{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} mqPushMsg := pbMsg.PushMsgDataToMQ{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} - pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID) + pid, offset, err := och.producerToPush.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID) if err != nil { log.Error(mqPushMsg.OperationID, "kafka send failed", "send data", message.String(), "pid", pid, "offset", offset, "err", err.Error()) } return } -func sendMessageToModifyMQ(aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ) { +func (och *OnlineHistoryRedisConsumerHandler) sendMessageToModifyMQ(ctx context.Context, aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ) { if len(messages) > 0 { - pid, offset, err := producerToModify.SendMessage(&pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID) + pid, offset, err := och.producerToModify.SendMessage(&pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID) if err != nil { log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID) - } else { - // log.NewWarn(m.OperationID, "sendMsgToKafka client msgID ", m.MsgData.ClientMsgID) } } } -// String hashes a string to a unique hashcode. -// -// crc32 returns a uint32, but for our use we need -// and non negative integer. Here we cast to an integer -// and invert it if the result is negative. -func getHashCode(s string) uint32 { - return crc32.ChecksumIEEE([]byte(s)) +func (och *OnlineHistoryRedisConsumerHandler) SendMessageToMongoCH(ctx context.Context, aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ, lastSeq uint64) { + if len(messages) > 0 { + pid, offset, err := och.producerToMongo.SendMessage(&pbMsg.MsgDataToMongoByMQ{LastSeq: lastSeq, AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID) + if err != nil { + log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID) + } + } } diff --git a/internal/msgtransfer/online_msg_to_mongo_handler.go b/internal/msgtransfer/online_msg_to_mongo_handler.go index 7c863e0c3..9d1202321 100644 --- a/internal/msgtransfer/online_msg_to_mongo_handler.go +++ b/internal/msgtransfer/online_msg_to_mongo_handler.go @@ -3,28 +3,29 @@ package msgtransfer import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db" + "Open_IM/pkg/common/db/cache" + "Open_IM/pkg/common/db/controller" kfk "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/log" + "Open_IM/pkg/common/tracelog" pbMsg "Open_IM/pkg/proto/msg" - sdkws "Open_IM/pkg/proto/sdkws" + "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" + "context" "github.com/Shopify/sarama" "github.com/golang/protobuf/proto" ) type OnlineHistoryMongoConsumerHandler struct { - msgHandle map[string]fcb historyConsumerGroup *kfk.MConsumerGroup + msgInterface controller.MsgInterface + cache cache.Cache } func (mc *OnlineHistoryMongoConsumerHandler) Init() { - mc.msgHandle = make(map[string]fcb) - mc.msgHandle[config.Config.Kafka.MsgToMongo.Topic] = mc.handleChatWs2Mongo mc.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToMongo.Topic}, config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo) - } func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) { msg := cMsg.Value @@ -35,14 +36,17 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con return } log.Info(msgFromMQ.TriggerID, "BatchInsertChat2DB userID: ", msgFromMQ.AggregationID, "msgFromMQ.LastSeq: ", msgFromMQ.LastSeq) - err = db.DB.BatchInsertChat2DB(msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.TriggerID, msgFromMQ.LastSeq) + ctx := context.Background() + tracelog.SetOperationID(ctx, msgFromMQ.TriggerID) + //err = db.DB.BatchInsertChat2DB(msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.TriggerID, msgFromMQ.LastSeq) + err = mc.msgInterface.BatchInsertChat2DB(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.LastSeq) if err != nil { log.NewError(msgFromMQ.TriggerID, "single data insert to mongo err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID) - } else { - err = db.DB.DeleteMessageFromCache(msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.GetTriggerID()) - if err != nil { - log.NewError(msgFromMQ.TriggerID, "remove cache msg from redis err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID) - } + } + //err = db.DB.DeleteMessageFromCache(msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.GetTriggerID()) + err = mc.msgInterface.DeleteMessageFromCache(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList) + if err != nil { + log.NewError(msgFromMQ.TriggerID, "remove cache msg from redis err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID) } for _, v := range msgFromMQ.MessageList { if v.MsgData.ContentType == constant.DeleteMessageNotification { @@ -58,23 +62,23 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con log.NewError(msgFromMQ.TriggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String()) continue } - if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil { - log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList) + if totalUnExistSeqs, err := mc.msgInterface.DelMsgBySeqs(ctx, DeleteMessageTips.UserID, DeleteMessageTips.SeqList); err != nil { + log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, "error:", err.Error(), "totalUnExistSeqs: ", totalUnExistSeqs) } + } } } func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } - func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition()) for msg := range claim.Messages() { log.NewDebug("", "kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) if len(msg.Value) != 0 { - mc.msgHandle[msg.Topic](msg, string(msg.Key), sess) + mc.handleChatWs2Mongo(msg, string(msg.Key), sess) } else { log.Error("", "mongo msg get from kafka but is nil", msg.Key) } diff --git a/internal/msgtransfer/persistent_msg_handler.go b/internal/msgtransfer/persistent_msg_handler.go index 946dd6c48..3066f6079 100644 --- a/internal/msgtransfer/persistent_msg_handler.go +++ b/internal/msgtransfer/persistent_msg_handler.go @@ -9,7 +9,7 @@ package msgtransfer import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db/mysql_model/im_mysql_msg_model" + "Open_IM/pkg/common/db/controller" kfk "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/log" pbMsg "Open_IM/pkg/proto/msg" @@ -17,33 +17,17 @@ import ( "github.com/Shopify/sarama" "github.com/golang/protobuf/proto" - - promePkg "Open_IM/pkg/common/prometheus" ) type PersistentConsumerHandler struct { - msgHandle map[string]fcb persistentConsumerGroup *kfk.MConsumerGroup + chatLogInterface controller.ChatLogInterface } func (pc *PersistentConsumerHandler) Init() { - pc.msgHandle = make(map[string]fcb) - pc.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = pc.handleChatWs2Mysql pc.persistentConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic}, config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMySql) - -} - -func initPrometheus() { - promePkg.NewSeqGetSuccessCounter() - promePkg.NewSeqGetFailedCounter() - promePkg.NewSeqSetSuccessCounter() - promePkg.NewSeqSetFailedCounter() - promePkg.NewMsgInsertRedisSuccessCounter() - promePkg.NewMsgInsertRedisFailedCounter() - promePkg.NewMsgInsertMongoSuccessCounter() - promePkg.NewMsgInsertMongoFailedCounter() } func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) { @@ -75,7 +59,7 @@ func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMes } if tag { log.NewInfo(msgFromMQ.OperationID, "msg_transfer msg persisting", string(msg)) - if err = im_mysql_msg_model.InsertMessageToChatLog(msgFromMQ); err != nil { + if err = pc.chatLogInterface.CreateChatLog(msgFromMQ); err != nil { log.NewError(msgFromMQ.OperationID, "Message insert failed", "err", err.Error(), "msg", msgFromMQ.String()) return } @@ -90,7 +74,7 @@ func (pc *PersistentConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi for msg := range claim.Messages() { log.NewDebug("", "kafka get info to mysql", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) if len(msg.Value) != 0 { - pc.msgHandle[msg.Topic](msg, string(msg.Key), sess) + pc.handleChatWs2Mysql(msg, string(msg.Key), sess) } else { log.Error("", "msg get from kafka but is nil", msg.Key) } @@ -98,15 +82,3 @@ func (pc *PersistentConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi } return nil } - -1. 请求1 group Rpc 2. 请求2 发消息 sendMsg rpc -1 更改数据库 - -2. 删除哈希缓存 - 检测到哈希变了, 群成员还没来得及删除,有问题 -3. 删除群成员缓存 - -4. 删除对应群成员加群缓存 - -5. 删除数量缓存 - diff --git a/internal/push/logic/init.go b/internal/push/logic/init.go index 08ed3057a..04334a1fd 100644 --- a/internal/push/logic/init.go +++ b/internal/push/logic/init.go @@ -15,7 +15,7 @@ import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "Open_IM/pkg/common/kafka" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/statistics" "fmt" ) @@ -53,15 +53,15 @@ func init() { } func initPrometheus() { - promePkg.NewMsgOfflinePushSuccessCounter() - promePkg.NewMsgOfflinePushFailedCounter() + prome.NewMsgOfflinePushSuccessCounter() + prome.NewMsgOfflinePushFailedCounter() } func Run(promethuesPort int) { go rpcServer.run() go pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&pushCh) go func() { - err := promePkg.StartPromeSrv(promethuesPort) + err := prome.StartPromeSrv(promethuesPort) if err != nil { panic(err) } diff --git a/internal/push/logic/push_rpc_server.go b/internal/push/logic/push_rpc_server.go index 9b8dde757..888dfb255 100644 --- a/internal/push/logic/push_rpc_server.go +++ b/internal/push/logic/push_rpc_server.go @@ -5,7 +5,7 @@ import ( "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/getcdv3" pbPush "Open_IM/pkg/proto/push" "Open_IM/pkg/utils" @@ -47,11 +47,11 @@ func (r *RPCServer) run() { defer listener.Close() var grpcOpts []grpc.ServerOption if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/internal/push/logic/push_to_client.go b/internal/push/logic/push_to_client.go index bc1f4b915..f1e42dc02 100644 --- a/internal/push/logic/push_to_client.go +++ b/internal/push/logic/push_to_client.go @@ -20,7 +20,7 @@ import ( "context" "strings" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "github.com/golang/protobuf/proto" ) @@ -144,10 +144,10 @@ func MsgToUser(pushMsg *pbPush.PushMsgReq) { } pushResult, err := offlinePusher.Push(UIDList, title, detailContent, pushMsg.OperationID, opts) if err != nil { - promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter) + prome.PromeInc(prome.MsgOfflinePushFailedCounter) log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error()) } else { - promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter) + prome.PromeInc(prome.MsgOfflinePushSuccessCounter) log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData) } } @@ -267,10 +267,10 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) { } pushResult, err := offlinePusher.Push(needOfflinePushUserIDList, title, detailContent, pushMsg.OperationID, opts) if err != nil { - promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter) + prome.PromeInc(prome.MsgOfflinePushFailedCounter) log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error()) } else { - promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter) + prome.PromeInc(prome.MsgOfflinePushSuccessCounter) log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData) } } diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go index c6df00f05..d55922465 100644 --- a/internal/rpc/auth/auth.go +++ b/internal/rpc/auth/auth.go @@ -8,7 +8,7 @@ import ( "Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/controller" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tracelog" pbAuth "Open_IM/pkg/proto/auth" @@ -42,13 +42,13 @@ func (s *rpcAuth) Run() { log.NewInfo(operationID, "listen network success ", listener, address) var grpcOpts []grpc.ServerOption if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() - promePkg.NewUserRegisterCounter() - promePkg.NewUserLoginCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() + prome.NewUserRegisterCounter() + prome.NewUserLoginCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/internal/rpc/conversation/conversaion.go b/internal/rpc/conversation/conversaion.go index 54d5a0380..e76b3901f 100644 --- a/internal/rpc/conversation/conversaion.go +++ b/internal/rpc/conversation/conversaion.go @@ -6,14 +6,11 @@ import ( "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/controller" - "Open_IM/pkg/common/db/relation" - "Open_IM/pkg/common/db/table" - "Open_IM/pkg/common/db/unrelation" + relationTb "Open_IM/pkg/common/db/relation" + unrealationTb "Open_IM/pkg/common/db/unrelation" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" - "Open_IM/pkg/getcdv3" + "Open_IM/pkg/common/prome" pbConversation "Open_IM/pkg/proto/conversation" - pbUser "Open_IM/pkg/proto/user" "Open_IM/pkg/utils" "context" "github.com/dtm-labs/rockscache" @@ -95,11 +92,11 @@ func (c *conversationServer) Run() { //grpc server var grpcOpts []grpc.ServerOption if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/internal/rpc/friend/friend.go b/internal/rpc/friend/friend.go index 00704381e..777246485 100644 --- a/internal/rpc/friend/friend.go +++ b/internal/rpc/friend/friend.go @@ -12,7 +12,7 @@ import ( relationTb "Open_IM/pkg/common/db/table/relation" "Open_IM/pkg/common/log" "Open_IM/pkg/common/middleware" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tracelog" pbFriend "Open_IM/pkg/proto/friend" @@ -75,11 +75,11 @@ func (s *friendServer) Run() { var grpcOpts []grpc.ServerOption grpcOpts = append(grpcOpts, grpc.UnaryInterceptor(middleware.RpcServerInterceptor)) if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/internal/rpc/group/callback.go b/internal/rpc/group/callback.go index feb1685cc..72bf4e423 100644 --- a/internal/rpc/group/callback.go +++ b/internal/rpc/group/callback.go @@ -1,7 +1,7 @@ package group import ( - cbApi "Open_IM/pkg/callback_struct" + "Open_IM/pkg/callbackstruct" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "Open_IM/pkg/common/db/table/relation" diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index 5c44aa31c..5e2eb7d12 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -12,7 +12,7 @@ import ( "Open_IM/pkg/common/db/unrelation" "Open_IM/pkg/common/log" "Open_IM/pkg/common/middleware" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tracelog" discoveryRegistry "Open_IM/pkg/discoveryregistry" @@ -70,9 +70,7 @@ func NewGroupServer(port int) *groupServer { if err != nil { panic(err.Error()) } - //conns, err := g.registerCenter.GetConns(config.Config.RpcRegisterName.OpenImConversationName) - g.GroupInterface = controller.NewGroupInterface(mysql.GormConn(), redis.GetClient(), mongo.GetClient()) return &g } @@ -98,11 +96,11 @@ func (s *groupServer) Run() { grpc.UnaryInterceptor(middleware.RpcServerInterceptor), } if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/internal/rpc/msg/pull_message.go b/internal/rpc/msg/pull_message.go index 265c1e264..9de78e2ca 100644 --- a/internal/rpc/msg/pull_message.go +++ b/internal/rpc/msg/pull_message.go @@ -9,7 +9,7 @@ import ( "Open_IM/pkg/common/log" sdkws "Open_IM/pkg/proto/sdkws" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" ) func (rpc *rpcChat) GetMaxAndMinSeq(_ context.Context, in *sdkws.GetMaxAndMinSeqReq) (*sdkws.GetMaxAndMinSeqResp, error) { @@ -53,25 +53,25 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *sdkws.PullMessag redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(in.UserID, in.SeqList, in.OperationID) if err != nil { if err != go_redis.Nil { - promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList)) log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList) } else { log.Debug(in.OperationID, "get message from redis is nil", failedSeqList) } - msgList, err1 := commonDB.DB.GetMsgBySeqListMongo2(in.UserID, failedSeqList, in.OperationID) + msgList, err1 := commonDB.DB.GetMsgBySeqs(in.UserID, failedSeqList, in.OperationID) if err1 != nil { - promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList)) log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error()) resp.ErrCode = 201 resp.ErrMsg = err1.Error() return resp, nil } else { - promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) + prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList)) redisMsgList = append(redisMsgList, msgList...) resp.List = redisMsgList } } else { - promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) + prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList)) resp.List = redisMsgList } @@ -80,26 +80,26 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *sdkws.PullMessag redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(k, v.SeqList, in.OperationID) if err != nil { if err != go_redis.Nil { - promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList)) log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList) } else { log.Debug(in.OperationID, "get message from redis is nil", failedSeqList) } - msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqListMongo(k, failedSeqList, in.OperationID) + msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqs(k, failedSeqList, in.OperationID) if err1 != nil { - promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList)) log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error()) resp.ErrCode = 201 resp.ErrMsg = err1.Error() return resp, nil } else { - promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) + prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList)) redisMsgList = append(redisMsgList, msgList...) x.MsgDataList = redisMsgList m[k] = x } } else { - promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) + prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList)) x.MsgDataList = redisMsgList m[k] = x } diff --git a/internal/rpc/msg/query_msg.go b/internal/rpc/msg/query_msg.go index f8a2afc7c..3c8d3ef50 100644 --- a/internal/rpc/msg/query_msg.go +++ b/internal/rpc/msg/query_msg.go @@ -3,7 +3,7 @@ package msg import ( commonDB "Open_IM/pkg/common/db" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/proto/msg" "Open_IM/pkg/utils" "context" @@ -16,20 +16,20 @@ func (rpc *rpcChat) GetSuperGroupMsg(context context.Context, req *msg.GetSuperG redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(req.GroupID, []uint32{req.Seq}, req.OperationID) if err != nil { if err != go_redis.Nil { - promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList)) log.Error(req.OperationID, "get message from redis exception", err.Error(), failedSeqList) } else { log.Debug(req.OperationID, "get message from redis is nil", failedSeqList) } - msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqListMongo(req.GroupID, failedSeqList, req.OperationID) + msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqs(req.GroupID, failedSeqList, req.OperationID) if err1 != nil { - promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) + prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList)) log.Error(req.OperationID, "GetSuperGroupMsg data error", req.String(), err.Error()) resp.ErrCode = 201 resp.ErrMsg = err.Error() return resp, nil } else { - promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) + prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList)) redisMsgList = append(redisMsgList, msgList...) for _, m := range msgList { resp.MsgData = m @@ -37,7 +37,7 @@ func (rpc *rpcChat) GetSuperGroupMsg(context context.Context, req *msg.GetSuperG } } else { - promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) + prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList)) for _, m := range redisMsgList { resp.MsgData = m } diff --git a/internal/rpc/msg/rpc_chat.go b/internal/rpc/msg/rpc_chat.go index 699f539a5..dec4db5cc 100644 --- a/internal/rpc/msg/rpc_chat.go +++ b/internal/rpc/msg/rpc_chat.go @@ -6,7 +6,7 @@ import ( "Open_IM/pkg/common/db" "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" "Open_IM/pkg/proto/msg" "Open_IM/pkg/utils" "github.com/OpenIMSDK/getcdv3" @@ -66,21 +66,21 @@ func (rpc *rpcChat) initPrometheus() { // Name: "send_msg_failed", // Help: "The number of send msg failed", //}) - promePkg.NewMsgPullFromRedisSuccessCounter() - promePkg.NewMsgPullFromRedisFailedCounter() - promePkg.NewMsgPullFromMongoSuccessCounter() - promePkg.NewMsgPullFromMongoFailedCounter() + prome.NewMsgPullFromRedisSuccessCounter() + prome.NewMsgPullFromRedisFailedCounter() + prome.NewMsgPullFromMongoSuccessCounter() + prome.NewMsgPullFromMongoFailedCounter() - promePkg.NewSingleChatMsgRecvSuccessCounter() - promePkg.NewGroupChatMsgRecvSuccessCounter() - promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter() + prome.NewSingleChatMsgRecvSuccessCounter() + prome.NewGroupChatMsgRecvSuccessCounter() + prome.NewWorkSuperGroupChatMsgRecvSuccessCounter() - promePkg.NewSingleChatMsgProcessSuccessCounter() - promePkg.NewSingleChatMsgProcessFailedCounter() - promePkg.NewGroupChatMsgProcessSuccessCounter() - promePkg.NewGroupChatMsgProcessFailedCounter() - promePkg.NewWorkSuperGroupChatMsgProcessSuccessCounter() - promePkg.NewWorkSuperGroupChatMsgProcessFailedCounter() + prome.NewSingleChatMsgProcessSuccessCounter() + prome.NewSingleChatMsgProcessFailedCounter() + prome.NewGroupChatMsgProcessSuccessCounter() + prome.NewGroupChatMsgProcessFailedCounter() + prome.NewWorkSuperGroupChatMsgProcessSuccessCounter() + prome.NewWorkSuperGroupChatMsgProcessFailedCounter() } func (rpc *rpcChat) Run() { @@ -104,11 +104,11 @@ func (rpc *rpcChat) Run() { grpc.MaxSendMsgSize(sendSize), } if config.Config.Prometheus.Enable { - promePkg.NewGrpcRequestCounter() - promePkg.NewGrpcRequestFailedCounter() - promePkg.NewGrpcRequestSuccessCounter() + prome.NewGrpcRequestCounter() + prome.NewGrpcRequestFailedCounter() + prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) @@ -146,9 +146,9 @@ func (rpc *rpcChat) runCh() { case msg := <-rpc.delMsgCh: log.NewInfo(msg.OperationID, utils.GetSelfFuncName(), "delmsgch recv new: ", msg) db.DB.DelMsgFromCache(msg.UserID, msg.SeqList, msg.OperationID) - unexistSeqList, err := db.DB.DelMsgBySeqList(msg.UserID, msg.SeqList, msg.OperationID) + unexistSeqList, err := db.DB.DelMsgBySeqs(msg.UserID, msg.SeqList, msg.OperationID) if err != nil { - log.NewError(msg.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", msg.UserID, msg.SeqList, msg.OperationID, err.Error()) + log.NewError(msg.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", msg.UserID, msg.SeqList, msg.OperationID, err.Error()) continue } if len(unexistSeqList) > 0 { diff --git a/internal/rpc/msg/send_msg.go b/internal/rpc/msg/send_msg.go index 2060e8998..92d556a25 100644 --- a/internal/rpc/msg/send_msg.go +++ b/internal/rpc/msg/send_msg.go @@ -23,7 +23,7 @@ import ( "sync" "time" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" go_redis "github.com/go-redis/redis/v8" "github.com/golang/protobuf/proto" ) @@ -369,7 +369,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat } switch pb.MsgData.SessionType { case constant.SingleChatType: - promePkg.PromeInc(promePkg.SingleChatMsgRecvSuccessCounter) + prome.PromeInc(prome.SingleChatMsgRecvSuccessCounter) // callback t1 = time.Now() callbackResp := callbackBeforeSendSingleMsg(pb) @@ -382,7 +382,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat callbackResp.ErrCode = 201 } log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp) - promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) + prome.PromeInc(prome.SingleChatMsgProcessFailedCounter) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) } t1 = time.Now() @@ -402,7 +402,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1)) if err1 != nil { log.NewError(msgToMQSingle.OperationID, "kafka send msg err :RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String(), err1.Error()) - promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) + prome.PromeInc(prome.SingleChatMsgProcessFailedCounter) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) } } @@ -412,7 +412,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1)) if err2 != nil { log.NewError(msgToMQSingle.OperationID, "kafka send msg err:SendID", msgToMQSingle.MsgData.SendID, msgToMQSingle.String()) - promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) + prome.PromeInc(prome.SingleChatMsgProcessFailedCounter) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) } } @@ -423,11 +423,11 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat if callbackResp.ErrCode != 0 { log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSingleMsg resp: ", callbackResp) } - promePkg.PromeInc(promePkg.SingleChatMsgProcessSuccessCounter) + prome.PromeInc(prome.SingleChatMsgProcessSuccessCounter) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) case constant.GroupChatType: // callback - promePkg.PromeInc(promePkg.GroupChatMsgRecvSuccessCounter) + prome.PromeInc(prome.GroupChatMsgRecvSuccessCounter) callbackResp := callbackBeforeSendGroupMsg(pb) if callbackResp.ErrCode != 0 { log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendGroupMsg resp:", callbackResp) @@ -437,12 +437,12 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat callbackResp.ErrCode = 201 } log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp) - promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.GroupChatMsgProcessFailedCounter) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) } var memberUserIDList []string if flag, errCode, errMsg, memberUserIDList = rpc.messageVerification(ctx, pb); !flag { - promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.GroupChatMsgProcessFailedCounter) return returnMsg(&replay, pb, errCode, errMsg, "", 0) } log.Debug(pb.OperationID, "GetGroupAllMember userID list", memberUserIDList, "len: ", len(memberUserIDList)) @@ -506,7 +506,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat } if !sendTag { log.NewWarn(pb.OperationID, "send tag is ", sendTag) - promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.GroupChatMsgProcessFailedCounter) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) } else { if pb.MsgData.ContentType == constant.AtText { @@ -571,7 +571,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat }() } log.Debug(pb.OperationID, "send msg cost time3 ", time.Since(t1), pb.MsgData.ClientMsgID) - promePkg.PromeInc(promePkg.GroupChatMsgProcessSuccessCounter) + prome.PromeInc(prome.GroupChatMsgProcessSuccessCounter) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) } case constant.NotificationChatType: @@ -595,7 +595,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat log.Debug(pb.OperationID, "send msg cost time ", time.Since(t1), pb.MsgData.ClientMsgID) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) case constant.SuperGroupChatType: - promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgRecvSuccessCounter) + prome.PromeInc(prome.WorkSuperGroupChatMsgRecvSuccessCounter) // callback callbackResp := callbackBeforeSendGroupMsg(pb) if callbackResp.ErrCode != 0 { @@ -605,12 +605,12 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat if callbackResp.ErrCode == 0 { callbackResp.ErrCode = 201 } - promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter) log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSuperGroupMsg result", "end rpc and return", callbackResp) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) } if flag, errCode, errMsg, _ = rpc.messageVerification(ctx, pb); !flag { - promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter) return returnMsg(&replay, pb, errCode, errMsg, "", 0) } msgToMQSingle.MsgData = pb.MsgData @@ -618,7 +618,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat err1 := rpc.sendMsgToWriter(ctx, &msgToMQSingle, msgToMQSingle.MsgData.GroupID, constant.OnlineStatus) if err1 != nil { log.NewError(msgToMQSingle.OperationID, "kafka send msg err:RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String()) - promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) + prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) } // callback @@ -626,7 +626,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat if callbackResp.ErrCode != 0 { log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSuperGroupMsg resp: ", callbackResp) } - promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessSuccessCounter) + prome.PromeInc(prome.WorkSuperGroupChatMsgProcessSuccessCounter) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) default: diff --git a/internal/rpc/user/callback.go b/internal/rpc/user/callback.go deleted file mode 100644 index a00006b65..000000000 --- a/internal/rpc/user/callback.go +++ /dev/null @@ -1 +0,0 @@ -package user diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index a7c50d823..ae76f30bd 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -3,6 +3,7 @@ package user import ( "Open_IM/internal/common/convert" "Open_IM/internal/common/rpc_server" + "Open_IM/internal/common/rpcserver" chat "Open_IM/internal/rpc/msg" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" @@ -65,7 +66,7 @@ func (s *userServer) Run() { prome.NewGrpcRequestFailedCounter() prome.NewGrpcRequestSuccessCounter() grpcOpts = append(grpcOpts, []grpc.ServerOption{ - // grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), + // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), }...) diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index 9eda74055..ecf6b7774 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -225,10 +225,6 @@ type config struct { Addr []string `yaml:"addr"` Topic string `yaml:"topic"` } - //Ws2mschatOffline struct { - // Addr []string `yaml:"addr"` - // Topic string `yaml:"topic"` - //} MsgToMongo struct { Addr []string `yaml:"addr"` Topic string `yaml:"topic"` @@ -252,7 +248,6 @@ type config struct { Secret string `yaml:"secret"` MultiLoginPolicy int `yaml:"multiloginpolicy"` ChatPersistenceMysql bool `yaml:"chatpersistencemysql"` - ReliableStorage bool `yaml:"reliablestorage"` MsgCacheTimeout int `yaml:"msgCacheTimeout"` GroupMessageHasReadReceiptEnable bool `yaml:"groupMessageHasReadReceiptEnable"` SingleMessageHasReadReceiptEnable bool `yaml:"singleMessageHasReadReceiptEnable"` diff --git a/pkg/common/db/cache/conversation.go b/pkg/common/db/cache/conversation.go index 844e9b937..b75a0aca4 100644 --- a/pkg/common/db/cache/conversation.go +++ b/pkg/common/db/cache/conversation.go @@ -21,7 +21,7 @@ const ( conversationExpireTime = time.Second * 60 * 60 * 12 ) -// args fn will exec when no data in cache +// arg fn will exec when no data in cache type ConversationCache interface { // get user's conversationIDs from cache GetUserConversationIDs(ctx context.Context, userID string, fn func(ctx context.Context, userID string) ([]string, error)) ([]string, error) @@ -96,7 +96,7 @@ func (c *ConversationRedis) GetUserConversationIDs(ctx context.Context, ownerUse // return nil, utils.Wrap(err, "") //} //return conversationIDs, nil - return GetCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), time.Second*30*60, func(ctx context.Context) ([]string, error) { + return GetCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), conversationExpireTime, func(ctx context.Context) ([]string, error) { return f(ownerUserID) }) } @@ -122,7 +122,7 @@ func (c *ConversationRedis) GetUserConversationIDs1(ctx context.Context, ownerUs // return nil, utils.Wrap(err, "") //} //return conversationIDs, nil - return GetCache1[[]string](c.rcClient, c.getConversationIDsKey(ownerUserID), time.Second*30*60, fn) + return GetCache1[[]string](c.rcClient, c.getConversationIDsKey(ownerUserID), conversationExpireTime, fn) } //func GetCache1[T any](rcClient *rockscache.Client, key string, expire time.Duration, fn func() (any, error)) (T, error) { diff --git a/pkg/common/db/cache/redis.go b/pkg/common/db/cache/redis.go index 82523eb0f..2108701f0 100644 --- a/pkg/common/db/cache/redis.go +++ b/pkg/common/db/cache/redis.go @@ -3,10 +3,9 @@ package cache import ( "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" - log2 "Open_IM/pkg/common/log" pbChat "Open_IM/pkg/proto/msg" pbRtc "Open_IM/pkg/proto/rtc" - pbCommon "Open_IM/pkg/proto/sdkws" + "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" "context" "errors" @@ -39,13 +38,52 @@ const ( ) type Cache interface { - IncrUserSeq(uid string) (uint64, error) - GetUserMaxSeq(uid string) (uint64, error) - SetUserMaxSeq(uid string, maxSeq uint64) error - SetUserMinSeq(uid string, minSeq uint32) (err error) - GetUserMinSeq(uid string) (uint64, error) - SetGroupUserMinSeq(groupID, userID string, minSeq uint64) (err error) - GetGroupUserMinSeq(groupID, userID string) (uint64, error) + IncrUserSeq(ctx context.Context, userID string) (uint64, error) + GetUserMaxSeq(ctx context.Context, userID string) (uint64, error) + SetUserMaxSeq(ctx context.Context, userID string, maxSeq uint64) error + SetUserMinSeq(ctx context.Context, userID string, minSeq uint64) (err error) + GetUserMinSeq(ctx context.Context, userID string) (uint64, error) + SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq uint64) (err error) + GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (uint64, error) + GetGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) + IncrGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) + SetGroupMaxSeq(ctx context.Context, groupID string, maxSeq uint64) error + SetGroupMinSeq(ctx context.Context, groupID string, minSeq uint32) error + AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error + GetTokenMapByUidPid(ctx context.Context, userID, platformID string) (map[string]int, error) + SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error + DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error + GetMessageListBySeq(ctx context.Context, userID string, seqList []uint32) (seqMsg []*sdkws.MsgData, failedSeqList []uint32, err error) + SetMessageToCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) (int, error) + DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) error + CleanUpOneUserAllMsgFromRedis(ctx context.Context, userID string) error + HandleSignalInfo(ctx context.Context, msg *sdkws.MsgData, pushToUserID string) (isSend bool, err error) + GetSignalInfoFromCacheByClientMsgID(ctx context.Context, clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error) + GetAvailableSignalInvitationInfo(ctx context.Context, userID string) (invitationInfo *pbRtc.SignalInviteReq, err error) + DelUserSignalList(ctx context.Context, userID string) error + DelMsgFromCache(ctx context.Context, userID string, seqList []uint32) error + + SetGetuiToken(ctx context.Context, token string, expireTime int64) error + GetGetuiToken(ctx context.Context) (string, error) + SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error + GetGetuiTaskID(ctx context.Context) (string, error) + + SetSendMsgStatus(ctx context.Context, status int32) error + GetSendMsgStatus(ctx context.Context) (int, error) + SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) + GetFcmToken(ctx context.Context, account string, platformID int) (string, error) + DelFcmToken(ctx context.Context, account string, platformID int) error + IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) + SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error + GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) + JudgeMessageReactionEXISTS(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) + GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) + DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error + SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) + GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) + SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error + LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error + UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error } // native redis operate @@ -54,7 +92,7 @@ type RedisClient struct { rdb redis.UniversalClient } -func (r *RedisClient) InitRedis() { +func (r *RedisClient) InitRedis() error { var rdb redis.UniversalClient var err error ctx := context.Background() @@ -67,8 +105,8 @@ func (r *RedisClient) InitRedis() { }) _, err = rdb.Ping(ctx).Result() if err != nil { - fmt.Println("redis cluster failed address ", config.Config.Redis.DBAddress) - panic(err.Error() + " redis cluster " + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord) + fmt.Println("redis cluster failed address ", config.Config.Redis.DBAddress, config.Config.Redis.DBUserName, config.Config.Redis.DBPassWord) + return err } } else { rdb = redis.NewClient(&redis.Options{ @@ -80,10 +118,12 @@ func (r *RedisClient) InitRedis() { }) _, err = rdb.Ping(ctx).Result() if err != nil { - panic(err.Error() + " redis " + config.Config.Redis.DBAddress[0] + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord) + fmt.Println(" redis " + config.Config.Redis.DBAddress[0] + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord) + return err } } r.rdb = rdb + return nil } func (r *RedisClient) GetClient() redis.UniversalClient { @@ -95,80 +135,78 @@ func NewRedisClient(rdb redis.UniversalClient) *RedisClient { } // Perform seq auto-increment operation of user messages -func (r *RedisClient) IncrUserSeq(uid string) (uint64, error) { +func (r *RedisClient) IncrUserSeq(ctx context.Context, uid string) (uint64, error) { key := userIncrSeq + uid seq, err := r.rdb.Incr(context.Background(), key).Result() return uint64(seq), err } // Get the largest Seq -func (r *RedisClient) GetUserMaxSeq(uid string) (uint64, error) { +func (r *RedisClient) GetUserMaxSeq(ctx context.Context, uid string) (uint64, error) { key := userIncrSeq + uid seq, err := r.rdb.Get(context.Background(), key).Result() return uint64(utils.StringToInt(seq)), err } // set the largest Seq -func (r *RedisClient) SetUserMaxSeq(uid string, maxSeq uint64) error { +func (r *RedisClient) SetUserMaxSeq(ctx context.Context, uid string, maxSeq uint64) error { key := userIncrSeq + uid return r.rdb.Set(context.Background(), key, maxSeq, 0).Err() } // Set the user's minimum seq -func (r *RedisClient) SetUserMinSeq(uid string, minSeq uint32) (err error) { +func (r *RedisClient) SetUserMinSeq(ctx context.Context, uid string, minSeq uint64) (err error) { key := userMinSeq + uid return r.rdb.Set(context.Background(), key, minSeq, 0).Err() } // Get the smallest Seq -func (r *RedisClient) GetUserMinSeq(uid string) (uint64, error) { +func (r *RedisClient) GetUserMinSeq(ctx context.Context, uid string) (uint64, error) { key := userMinSeq + uid seq, err := r.rdb.Get(context.Background(), key).Result() return uint64(utils.StringToInt(seq)), err } -func (r *RedisClient) SetGroupUserMinSeq(groupID, userID string, minSeq uint64) (err error) { +func (r *RedisClient) SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq uint64) (err error) { key := groupUserMinSeq + "g:" + groupID + "u:" + userID return r.rdb.Set(context.Background(), key, minSeq, 0).Err() } -func (r *RedisClient) GetGroupUserMinSeq(groupID, userID string) (uint64, error) { +func (r *RedisClient) GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (uint64, error) { key := groupUserMinSeq + "g:" + groupID + "u:" + userID seq, err := r.rdb.Get(context.Background(), key).Result() return uint64(utils.StringToInt(seq)), err } -func (r *RedisClient) GetGroupMaxSeq(groupID string) (uint64, error) { +func (r *RedisClient) GetGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) { key := groupMaxSeq + groupID seq, err := r.rdb.Get(context.Background(), key).Result() return uint64(utils.StringToInt(seq)), err } -func (r *RedisClient) IncrGroupMaxSeq(groupID string) (uint64, error) { +func (r *RedisClient) IncrGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) { key := groupMaxSeq + groupID seq, err := r.rdb.Incr(context.Background(), key).Result() return uint64(seq), err } -func (r *RedisClient) SetGroupMaxSeq(groupID string, maxSeq uint64) error { +func (r *RedisClient) SetGroupMaxSeq(ctx context.Context, groupID string, maxSeq uint64) error { key := groupMaxSeq + groupID return r.rdb.Set(context.Background(), key, maxSeq, 0).Err() } -func (r *RedisClient) SetGroupMinSeq(groupID string, minSeq uint32) error { +func (r *RedisClient) SetGroupMinSeq(ctx context.Context, groupID string, minSeq uint32) error { key := groupMinSeq + groupID return r.rdb.Set(context.Background(), key, minSeq, 0).Err() } // Store userid and platform class to redis -func (r *RedisClient) AddTokenFlag(userID string, platformID int, token string, flag int) error { +func (r *RedisClient) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error { key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) - log2.NewDebug("", "add token key is ", key) return r.rdb.HSet(context.Background(), key, token, flag).Err() } -func (r *RedisClient) GetTokenMapByUidPid(userID, platformID string) (map[string]int, error) { +func (r *RedisClient) GetTokenMapByUidPid(ctx context.Context, userID, platformID string) (map[string]int, error) { key := uidPidToken + userID + ":" + platformID - log2.NewDebug("", "get token key is ", key) m, err := r.rdb.HGetAll(context.Background(), key).Result() mm := make(map[string]int) for k, v := range m { @@ -176,7 +214,7 @@ func (r *RedisClient) GetTokenMapByUidPid(userID, platformID string) (map[string } return mm, err } -func (r *RedisClient) SetTokenMapByUidPid(userID string, platformID int, m map[string]int) error { +func (r *RedisClient) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error { key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) mm := make(map[string]interface{}) for k, v := range m { @@ -185,12 +223,12 @@ func (r *RedisClient) SetTokenMapByUidPid(userID string, platformID int, m map[s return r.rdb.HSet(context.Background(), key, mm).Err() } -func (r *RedisClient) DeleteTokenByUidPid(userID string, platformID int, fields []string) error { +func (r *RedisClient) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error { key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) return r.rdb.HDel(context.Background(), key, fields...).Err() } -func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, operationID string) (seqMsg []*pbCommon.MsgData, failedSeqList []uint32, errResult error) { +func (r *RedisClient) GetMessageListBySeq(ctx context.Context, userID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, failedSeqList []uint32, errResult error) { for _, v := range seqList { //MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1 key := messageCache + userID + "_" + strconv.Itoa(int(v)) @@ -198,16 +236,13 @@ func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, opera if err != nil { errResult = err failedSeqList = append(failedSeqList, v) - log2.Debug(operationID, "redis get message error: ", err.Error(), v) } else { - msg := pbCommon.MsgData{} + msg := sdkws.MsgData{} err = jsonpb.UnmarshalString(result, &msg) if err != nil { errResult = err failedSeqList = append(failedSeqList, v) - log2.NewWarn(operationID, "Unmarshal err ", result, err.Error()) } else { - log2.NewDebug(operationID, "redis get msg is ", msg.String()) seqMsg = append(seqMsg, &msg) } @@ -216,48 +251,40 @@ func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, opera return seqMsg, failedSeqList, errResult } -func (r *RedisClient) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) (error, int) { - ctx := context.Background() +func (r *RedisClient) SetMessageToCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ, uid string) (int, error) { pipe := r.rdb.Pipeline() var failedList []pbChat.MsgDataToMQ for _, msg := range msgList { key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq)) s, err := utils.Pb2String(msg.MsgData) if err != nil { - log2.NewWarn(operationID, utils.GetSelfFuncName(), "Pb2String failed", msg.MsgData.String(), uid, err.Error()) continue } - log2.NewDebug(operationID, "convert string is ", s) err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err() //err = r.rdb.HMSet(context.Background(), "12", map[string]interface{}{"1": 2, "343": false}).Err() if err != nil { - log2.NewWarn(operationID, utils.GetSelfFuncName(), "redis failed", "args:", key, *msg, uid, s, err.Error()) failedList = append(failedList, *msg) } } if len(failedList) != 0 { - return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID)), len(failedList) + return len(failedList), errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList)) } _, err := pipe.Exec(ctx) - return err, 0 + return 0, err } -func (r *RedisClient) DeleteMessageFromCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error { - ctx := context.Background() +func (r *RedisClient) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) error { for _, msg := range msgList { - key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq)) + key := messageCache + userID + "_" + strconv.Itoa(int(msg.MsgData.Seq)) err := r.rdb.Del(ctx, key).Err() if err != nil { - log2.NewWarn(operationID, utils.GetSelfFuncName(), "redis failed", "args:", key, uid, err.Error(), msgList) } } return nil } -func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(userID string, operationID string) error { - ctx := context.Background() +func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(ctx context.Context, userID string) error { key := messageCache + userID + "_" + "*" vals, err := r.rdb.Keys(ctx, key).Result() - log2.Debug(operationID, "vals: ", vals) if err == redis.Nil { return nil } @@ -270,7 +297,7 @@ func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(userID string, operationID s return nil } -func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData, pushToUserID string) (isSend bool, err error) { +func (r *RedisClient) HandleSignalInfo(ctx context.Context, operationID string, msg *sdkws.MsgData, pushToUserID string) (isSend bool, err error) { req := &pbRtc.SignalReq{} if err := proto.Unmarshal(msg.Content, req); err != nil { return false, err @@ -294,9 +321,7 @@ func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData return false, nil } if isInviteSignal { - log2.NewDebug(operationID, utils.GetSelfFuncName(), "invite userID list:", inviteeUserIDList) for _, userID := range inviteeUserIDList { - log2.NewInfo(operationID, utils.GetSelfFuncName(), "invite userID:", userID) timeout, err := strconv.Atoi(config.Config.Rtc.SignalTimeout) if err != nil { return false, err @@ -320,7 +345,7 @@ func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData return true, nil } -func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { +func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(ctx context.Context, clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { key := signalCache + clientMsgID invitationInfo = &pbRtc.SignalInviteReq{} bytes, err := r.rdb.Get(context.Background(), key).Bytes() @@ -342,7 +367,7 @@ func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(clientMsgID string) (i return invitationInfo, err } -func (r *RedisClient) GetAvailableSignalInvitationInfo(userID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { +func (r *RedisClient) GetAvailableSignalInvitationInfo(ctx context.Context, userID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { keyList := signalListCache + userID result := r.rdb.LPop(context.Background(), keyList) if err = result.Err(); err != nil { @@ -352,76 +377,70 @@ func (r *RedisClient) GetAvailableSignalInvitationInfo(userID string) (invitatio if err != nil { return nil, utils.Wrap(err, "GetAvailableSignalInvitationInfo failed") } - log2.NewDebug("", utils.GetSelfFuncName(), result, result.String()) - invitationInfo, err = r.GetSignalInfoFromCacheByClientMsgID(key) + invitationInfo, err = r.GetSignalInfoFromCacheByClientMsgID(ctx, key) if err != nil { return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID") } - err = r.DelUserSignalList(userID) + err = r.DelUserSignalList(ctx, userID) if err != nil { return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID") } return invitationInfo, nil } -func (r *RedisClient) DelUserSignalList(userID string) error { +func (r *RedisClient) DelUserSignalList(ctx context.Context, userID string) error { keyList := signalListCache + userID err := r.rdb.Del(context.Background(), keyList).Err() return err } -func (r *RedisClient) DelMsgFromCache(uid string, seqList []uint32, operationID string) { +func (r *RedisClient) DelMsgFromCache(ctx context.Context, uid string, seqList []uint32, operationID string) { for _, seq := range seqList { key := messageCache + uid + "_" + strconv.Itoa(int(seq)) result, err := r.rdb.Get(context.Background(), key).Result() if err != nil { if err == redis.Nil { - log2.NewDebug(operationID, utils.GetSelfFuncName(), err.Error(), "redis nil") } else { - log2.NewError(operationID, utils.GetSelfFuncName(), err.Error(), key) } continue } - var msg pbCommon.MsgData + var msg sdkws.MsgData if err := utils.String2Pb(result, &msg); err != nil { - log2.Error(operationID, utils.GetSelfFuncName(), "String2Pb failed", msg, result, key, err.Error()) continue } msg.Status = constant.MsgDeleted s, err := utils.Pb2String(&msg) if err != nil { - log2.Error(operationID, utils.GetSelfFuncName(), "Pb2String failed", msg, err.Error()) continue } if err := r.rdb.Set(context.Background(), key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil { - log2.Error(operationID, utils.GetSelfFuncName(), "Set failed", err.Error()) } } } -func (r *RedisClient) SetGetuiToken(token string, expireTime int64) error { +func (r *RedisClient) SetGetuiToken(ctx context.Context, token string, expireTime int64) error { return r.rdb.Set(context.Background(), getuiToken, token, time.Duration(expireTime)*time.Second).Err() } -func (r *RedisClient) GetGetuiToken() (string, error) { +func (r *RedisClient) GetGetuiToken(ctx context.Context) (string, error) { result, err := r.rdb.Get(context.Background(), getuiToken).Result() return result, err } -func (r *RedisClient) SetGetuiTaskID(taskID string, expireTime int64) error { +func (r *RedisClient) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error { return r.rdb.Set(context.Background(), getuiTaskID, taskID, time.Duration(expireTime)*time.Second).Err() } -func (r *RedisClient) GetGetuiTaskID() (string, error) { +func (r *RedisClient) GetGetuiTaskID(ctx context.Context) (string, error) { result, err := r.rdb.Get(context.Background(), getuiTaskID).Result() return result, err } -func (r *RedisClient) SetSendMsgStatus(status int32, operationID string) error { +func (r *RedisClient) SetSendMsgStatus(ctx context.Context, status int32, operationID string) error { return r.rdb.Set(context.Background(), sendMsgFailedFlag+operationID, status, time.Hour*24).Err() } -func (r *RedisClient) GetSendMsgStatus(operationID string) (int, error) { +func (r *RedisClient) GetSendMsgStatus(ctx context.Context, operationID string) (int, error) { result, err := r.rdb.Get(context.Background(), sendMsgFailedFlag+operationID).Result() if err != nil { return 0, err @@ -430,75 +449,71 @@ func (r *RedisClient) GetSendMsgStatus(operationID string) (int, error) { return status, err } -func (r *RedisClient) SetFcmToken(account string, platformID int, fcmToken string, expireTime int64) (err error) { +func (r *RedisClient) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) { key := FcmToken + account + ":" + strconv.Itoa(platformID) return r.rdb.Set(context.Background(), key, fcmToken, time.Duration(expireTime)*time.Second).Err() } -func (r *RedisClient) GetFcmToken(account string, platformID int) (string, error) { +func (r *RedisClient) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) { key := FcmToken + account + ":" + strconv.Itoa(platformID) return r.rdb.Get(context.Background(), key).Result() } -func (r *RedisClient) DelFcmToken(account string, platformID int) error { +func (r *RedisClient) DelFcmToken(ctx context.Context, account string, platformID int) error { key := FcmToken + account + ":" + strconv.Itoa(platformID) return r.rdb.Del(context.Background(), key).Err() } -func (r *RedisClient) IncrUserBadgeUnreadCountSum(uid string) (int, error) { +func (r *RedisClient) IncrUserBadgeUnreadCountSum(ctx context.Context, uid string) (int, error) { key := userBadgeUnreadCountSum + uid seq, err := r.rdb.Incr(context.Background(), key).Result() return int(seq), err } -func (r *RedisClient) SetUserBadgeUnreadCountSum(uid string, value int) error { +func (r *RedisClient) SetUserBadgeUnreadCountSum(ctx context.Context, uid string, value int) error { key := userBadgeUnreadCountSum + uid return r.rdb.Set(context.Background(), key, value, 0).Err() } -func (r *RedisClient) GetUserBadgeUnreadCountSum(uid string) (int, error) { +func (r *RedisClient) GetUserBadgeUnreadCountSum(ctx context.Context, uid string) (int, error) { key := userBadgeUnreadCountSum + uid seq, err := r.rdb.Get(context.Background(), key).Result() return utils.StringToInt(seq), err } -func (r *RedisClient) JudgeMessageReactionEXISTS(clientMsgID string, sessionType int32) (bool, error) { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) JudgeMessageReactionEXISTS(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) n, err := r.rdb.Exists(context.Background(), key).Result() - if n > 0 { - return true, err - } else { - return false, err - } + return n > 0, err } -func (r *RedisClient) GetOneMessageAllReactionList(clientMsgID string, sessionType int32) (map[string]string, error) { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) return r.rdb.HGetAll(context.Background(), key).Result() } -func (r *RedisClient) DeleteOneMessageKey(clientMsgID string, sessionType int32, subKey string) error { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) return r.rdb.HDel(context.Background(), key, subKey).Err() } -func (r *RedisClient) SetMessageReactionExpire(clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) return r.rdb.Expire(context.Background(), key, expiration).Result() } -func (r *RedisClient) GetMessageTypeKeyValue(clientMsgID string, sessionType int32, typeKey string) (string, error) { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) result, err := r.rdb.HGet(context.Background(), key, typeKey).Result() return result, err } -func (r *RedisClient) SetMessageTypeKeyValue(clientMsgID string, sessionType int32, typeKey, value string) error { - key := getMessageReactionExPrefix(clientMsgID, sessionType) +func (r *RedisClient) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error { + key := r.getMessageReactionExPrefix(clientMsgID, sessionType) return r.rdb.HSet(context.Background(), key, typeKey, value).Err() } -func (r *RedisClient) LockMessageTypeKey(clientMsgID string, TypeKey string) error { +func (r *RedisClient) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { key := exTypeKeyLocker + clientMsgID + "_" + TypeKey return r.rdb.SetNX(context.Background(), key, 1, time.Minute).Err() } -func (r *RedisClient) UnLockMessageTypeKey(clientMsgID string, TypeKey string) error { +func (r *RedisClient) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error { key := exTypeKeyLocker + clientMsgID + "_" + TypeKey return r.rdb.Del(context.Background(), key).Err() diff --git a/pkg/common/db/cache/token.go b/pkg/common/db/cache/token.go index 2aba43a0e..b54792f2e 100644 --- a/pkg/common/db/cache/token.go +++ b/pkg/common/db/cache/token.go @@ -5,7 +5,7 @@ import ( "Open_IM/pkg/common/tokenverify" "Open_IM/pkg/utils" "context" - go_redis "github.com/go-redis/redis/v8" + "github.com/go-redis/redis/v8" "github.com/golang-jwt/jwt/v4" ) @@ -34,7 +34,7 @@ func NewTokenRedis(redisClient *RedisClient, accessSecret string, accessExpire i func (t *TokenRedis) GetTokensWithoutError(ctx context.Context, userID, platform string) (map[string]int, error) { key := uidPidToken + userID + ":" + platform m, err := t.RedisClient.GetClient().HGetAll(context.Background(), key).Result() - if err != nil && err == go_redis.Nil { + if err != nil && err == redis.Nil { return nil, nil } mm := make(map[string]int) diff --git a/pkg/common/db/controller/extend_msg.go b/pkg/common/db/controller/extend_msg.go new file mode 100644 index 000000000..ba0bf5b25 --- /dev/null +++ b/pkg/common/db/controller/extend_msg.go @@ -0,0 +1,99 @@ +package controller + +import ( + unRelationTb "Open_IM/pkg/common/db/table/unrelation" + "Open_IM/pkg/proto/sdkws" + "context" + "github.com/go-redis/redis/v8" + "go.mongodb.org/mongo-driver/mongo" +) + +type ExtendMsgInterface interface { + CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error + GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) + GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) + InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error + InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) +} + +type ExtendMsgController struct { + database ExtendMsgDatabase +} + +func NewExtendMsgController(mgo *mongo.Client, rdb redis.UniversalClient) *ExtendMsgController { + return &ExtendMsgController{} +} + +func (e *ExtendMsgController) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error { + return e.database.CreateExtendMsgSet(ctx, set) +} + +func (e *ExtendMsgController) GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) { + return e.GetAllExtendMsgSet(ctx, ID, opts) +} + +func (e *ExtendMsgController) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) { + return e.GetExtendMsgSet(ctx, sourceID, sessionType, maxMsgUpdateTime) +} + +func (e *ExtendMsgController) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error { + return e.InsertExtendMsg(ctx, sourceID, sessionType, msg) +} + +func (e *ExtendMsgController) InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error { + return e.InsertOrUpdateReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList) +} +func (e *ExtendMsgController) DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error { + return e.DeleteReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList) +} + +func (e *ExtendMsgController) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) { + return e.GetExtendMsg(ctx, sourceID, sessionType, clientMsgID, maxMsgUpdateTime) +} + +type ExtendMsgDatabaseInterface interface { + CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error + GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) + GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) + InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error + InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) +} + +type ExtendMsgDatabase struct { + model unRelationTb.ExtendMsgSetModelInterface +} + +func NewExtendMsgDatabase() ExtendMsgDatabaseInterface { + return &ExtendMsgDatabase{} +} + +func (e *ExtendMsgDatabase) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error { + return e.model.CreateExtendMsgSet(ctx, set) +} + +func (e *ExtendMsgDatabase) GetAllExtendMsgSet(ctx context.Context, sourceID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) { + return e.model.GetAllExtendMsgSet(ctx, sourceID, opts) +} + +func (e *ExtendMsgDatabase) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) { + return e.model.GetExtendMsgSet(ctx, sourceID, sessionType, maxMsgUpdateTime) +} + +func (e *ExtendMsgDatabase) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error { + return e.model.InsertExtendMsg(ctx, sourceID, sessionType, msg) +} + +func (e *ExtendMsgDatabase) InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error { + return e.InsertOrUpdateReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList) +} +func (e *ExtendMsgDatabase) DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error { + return e.DeleteReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList) +} + +func (e *ExtendMsgDatabase) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) { + return e.GetExtendMsg(ctx, sourceID, sessionType, clientMsgID, maxMsgUpdateTime) +} diff --git a/pkg/common/db/controller/group.go b/pkg/common/db/controller/group.go index ba39bb511..75f765ad2 100644 --- a/pkg/common/db/controller/group.go +++ b/pkg/common/db/controller/group.go @@ -5,7 +5,7 @@ import ( "Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/relation" relationTb "Open_IM/pkg/common/db/table/relation" - unrelationTb "Open_IM/pkg/common/db/table/unrelation" + unRelationTb "Open_IM/pkg/common/db/table/unrelation" "Open_IM/pkg/common/db/unrelation" "Open_IM/pkg/utils" "context" @@ -47,8 +47,8 @@ type GroupInterface interface { TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error) PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error) // SuperGroup - FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) - FindJoinSuperGroup(ctx context.Context, userID string) (superGroup *unrelationTb.UserToSuperGroupModel, err error) + FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error) + FindJoinSuperGroup(ctx context.Context, userID string) (superGroup *unRelationTb.UserToSuperGroupModel, err error) CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error DeleteSuperGroup(ctx context.Context, groupID string) error DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error @@ -153,11 +153,11 @@ func (g *GroupController) PageGroupRequestUser(ctx context.Context, userID strin return g.database.PageGroupRequestUser(ctx, userID, pageNumber, showNumber) } -func (g *GroupController) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) { +func (g *GroupController) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error) { return g.database.FindSuperGroup(ctx, groupIDs) } -func (g *GroupController) FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) { +func (g *GroupController) FindJoinSuperGroup(ctx context.Context, userID string) (*unRelationTb.UserToSuperGroupModel, error) { return g.database.FindJoinSuperGroup(ctx, userID) } @@ -203,8 +203,8 @@ type GroupDataBaseInterface interface { TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error) PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error) // SuperGroup - FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) - FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) + FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error) + FindJoinSuperGroup(ctx context.Context, userID string) (*unRelationTb.UserToSuperGroupModel, error) CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error DeleteSuperGroup(ctx context.Context, groupID string) error DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error @@ -467,11 +467,11 @@ func (g *GroupDataBase) PageGroupRequestUser(ctx context.Context, userID string, return g.groupRequestDB.Page(ctx, userID, pageNumber, showNumber) } -func (g *GroupDataBase) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) { +func (g *GroupDataBase) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*table.SuperGroupModel, error) { return g.mongoDB.FindSuperGroup(ctx, groupIDs) } -func (g *GroupDataBase) FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) { +func (g *GroupDataBase) FindJoinSuperGroup(ctx context.Context, userID string) (*table.UserToSuperGroupModel, error) { return g.mongoDB.GetSuperGroupByUserID(ctx, userID) } diff --git a/pkg/common/db/controller/msg.go b/pkg/common/db/controller/msg.go index 6198f3a24..9fc620b87 100644 --- a/pkg/common/db/controller/msg.go +++ b/pkg/common/db/controller/msg.go @@ -1,76 +1,554 @@ package controller import ( + "Open_IM/pkg/common/constant" + "Open_IM/pkg/common/db/cache" + unRelationTb "Open_IM/pkg/common/db/table/unrelation" + "Open_IM/pkg/common/db/unrelation" + "Open_IM/pkg/common/log" + "Open_IM/pkg/common/prome" + "Open_IM/pkg/common/tracelog" + "github.com/gogo/protobuf/sortkeys" + "sync" + + //"Open_IM/pkg/common/log" pbMsg "Open_IM/pkg/proto/msg" "Open_IM/pkg/proto/sdkws" + "Open_IM/pkg/utils" "context" - "encoding/json" + "errors" + "github.com/go-redis/redis/v8" + "go.mongodb.org/mongo-driver/mongo" + + "github.com/golang/protobuf/proto" ) type MsgInterface interface { - BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error - BatchInsertChat2Cache(ctx context.Context, insertID string, msgList []*pbMsg.MsgDataToMQ) (error, uint64) - - DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) - // logic delete - DelMsgLogic(ctx context.Context, userID string, seqList []uint32) error - DelMsgBySeqListInOneDoc(ctx context.Context, docID string, seqList []uint32) (unExistSeqList []uint32, err error) - ReplaceMsgToBlankByIndex(docID string, index int) (replaceMaxSeq uint32, err error) - ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error - // 获取群ID或者UserID最新一条在mongo里面的消息 - GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) - // 获取群ID或者UserID最老一条在mongo里面的消息 - GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) - - GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) - GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) - GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unExistSeqList []uint32, err error) - SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error - - CleanUpUserMsgFromMongo(userID string, operationID string) error + // 批量插入消息到db + BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error + // 刪除redis中消息缓存 + DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error + // incrSeq然后批量插入缓存 + BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) + // 删除消息 返回不存在的seqList + DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) + // 获取群ID或者UserID最新一条在db里面的消息 + GetNewestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error) + // 获取群ID或者UserID最老一条在db里面的消息 + GetOldestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error) + // 通过seqList获取db中写扩散消息 + GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) + // 通过seqList获取大群在db里面的消息 + GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) + // 删除用户所有消息/cache/db然后重置seq + CleanUpUserMsgFromMongo(ctx context.Context, userID string) error + // 删除大群消息重置群成员最小群seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除 redis cache) + DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error + // 删除用户消息重置最小seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除redis cache) + DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error } -func NewMsgController() MsgDatabaseInterface { - return MsgController +func NewMsgController(mgo *mongo.Client, rdb redis.UniversalClient) MsgInterface { + return &MsgController{} } type MsgController struct { + database MsgDatabase +} + +func (m *MsgController) BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error { + return m.database.BatchInsertChat2DB(ctx, ID, msgList, currentMaxSeq) +} + +func (m *MsgController) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error { + return m.database.DeleteMessageFromCache(ctx, userID, msgList) +} + +func (m *MsgController) BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) { + return m.database.BatchInsertChat2Cache(ctx, sourceID, msgList) +} + +func (m *MsgController) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) { + return m.database.DelMsgBySeqs(ctx, userID, seqs) +} + +func (m *MsgController) GetNewestMsg(ctx context.Context, ID string) (msg *sdkws.MsgData, err error) { + return m.database.GetNewestMsg(ctx, ID) +} + +func (m *MsgController) GetOldestMsg(ctx context.Context, ID string) (msg *sdkws.MsgData, err error) { + return m.database.GetOldestMsg(ctx, ID) +} + +func (m *MsgController) GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) { + return m.database.GetMsgBySeqs(ctx, userID, seqs) +} + +func (m *MsgController) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) { + return m.database.GetSuperGroupMsgBySeqs(ctx, groupID, seqs) +} + +func (m *MsgController) CleanUpUserMsgFromMongo(ctx context.Context, userID string) error { + return m.database.CleanUpUserMsgFromMongo(ctx, userID) +} + +func (m *MsgController) DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error { + return m.database.DeleteUserMsgsAndSetMinSeq(ctx, userID, remainTime) +} + +func (m *MsgController) DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error { + return m.database.DeleteUserMsgsAndSetMinSeq(ctx, userID, remainTime) } type MsgDatabaseInterface interface { - BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error - BatchInsertChat2Cache(ctx context.Context, insertID string, msgList []*pbMsg.MsgDataToMQ) (error, uint64) - - DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) - // logic delete - DelMsgLogic(ctx context.Context, userID string, seqList []uint32) error - DelMsgBySeqListInOneDoc(ctx context.Context, docID string, seqList []uint32) (unExistSeqList []uint32, err error) - ReplaceMsgToBlankByIndex(docID string, index int) (replaceMaxSeq uint32, err error) - ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error + // 批量插入消息 + BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error + // 刪除redis中消息缓存 + DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error + // incrSeq然后批量插入缓存 + BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) + // 删除消息 返回不存在的seqList + DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) // 获取群ID或者UserID最新一条在mongo里面的消息 - GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) + GetNewestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error) // 获取群ID或者UserID最老一条在mongo里面的消息 - GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) - - GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) - GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) - GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unExistSeqList []uint32, err error) - SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error + GetOldestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error) + // 通过seqList获取mongo中写扩散消息 + GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) + // 通过seqList获取大群在 mongo里面的消息 + GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) // 删除用户所有消息/redis/mongo然后重置seq - CleanUpUserMsgFromMongo(userID string, operationID string) error -} - -func NewMsgDatabase() MsgDatabaseInterface { - return MsgDatabase + CleanUpUserMsgFromMongo(ctx context.Context, userID string) error + // 删除大群消息重置群成员最小群seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除 redis cache) + DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error + // 删除用户消息重置最小seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除redis cache) + DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error } type MsgDatabase struct { + msgModel unRelationTb.MsgDocModelInterface + msgCache cache.Cache + msg unRelationTb.MsgDocModel } -func (m *MsgDatabase) BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error { - +func NewMsgDatabase(mgo *mongo.Client, rdb redis.UniversalClient) MsgDatabaseInterface { + return &MsgDatabase{} } -func (m *MsgDatabase) CleanUpUserMsgFromMongo(userID string, operationID string) error { +func (db *MsgDatabase) BatchInsertChat2DB(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error { + //newTime := utils.GetCurrentTimestampByMill() + if len(msgList) > db.msg.GetSingleGocMsgNum() { + return errors.New("too large") + } + var remain uint64 + blk0 := uint64(db.msg.GetSingleGocMsgNum() - 1) + //currentMaxSeq 4998 + if currentMaxSeq < uint64(db.msg.GetSingleGocMsgNum()) { + remain = blk0 - currentMaxSeq //1 + } else { + excludeBlk0 := currentMaxSeq - blk0 //=1 + //(5000-1)%5000 == 4999 + remain = (uint64(db.msg.GetSingleGocMsgNum()) - (excludeBlk0 % uint64(db.msg.GetSingleGocMsgNum()))) % uint64(db.msg.GetSingleGocMsgNum()) + } + //remain=1 + insertCounter := uint64(0) + msgsToMongo := make([]unRelationTb.MsgInfoModel, 0) + msgsToMongoNext := make([]unRelationTb.MsgInfoModel, 0) + docID := "" + docIDNext := "" + var err error + for _, m := range msgList { + //log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID) + currentMaxSeq++ + sMsg := unRelationTb.MsgInfoModel{} + sMsg.SendTime = m.MsgData.SendTime + m.MsgData.Seq = uint32(currentMaxSeq) + if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { + return utils.Wrap(err, "") + } + if insertCounter < remain { + msgsToMongo = append(msgsToMongo, sMsg) + insertCounter++ + docID = db.msg.GetDocID(sourceID, uint32(currentMaxSeq)) + //log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID) + } else { + msgsToMongoNext = append(msgsToMongoNext, sMsg) + docIDNext = db.msg.GetDocID(sourceID, uint32(currentMaxSeq)) + //log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID) + } + } + if docID != "" { + //filter := bson.M{"uid": seqUid} + //log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo, "userID: ", userID) + //err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgsToMongo}}}).Err() + err = db.msgModel.PushMsgsToDoc(ctx, docID, msgsToMongo) + if err != nil { + if err == mongo.ErrNoDocuments { + doc := &unRelationTb.MsgDocModel{} + doc.DocID = docID + doc.Msg = msgsToMongo + if err = db.msgModel.Create(ctx, doc); err != nil { + prome.PromeInc(prome.MsgInsertMongoFailedCounter) + //log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) + return utils.Wrap(err, "") + } + prome.PromeInc(prome.MsgInsertMongoSuccessCounter) + } else { + prome.PromeInc(prome.MsgInsertMongoFailedCounter) + //log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter) + return utils.Wrap(err, "") + } + } else { + prome.PromeInc(prome.MsgInsertMongoSuccessCounter) + } + } + if docIDNext != "" { + nextDoc := &unRelationTb.MsgDocModel{} + nextDoc.DocID = docIDNext + nextDoc.Msg = msgsToMongoNext + //log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID) + if err = db.msgModel.Create(ctx, nextDoc); err != nil { + prome.PromeInc(prome.MsgInsertMongoFailedCounter) + //log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) + return utils.Wrap(err, "") + } + prome.PromeInc(prome.MsgInsertMongoSuccessCounter) + } + //log.Debug(operationID, "batch mgo cost time ", mongo2.getCurrentTimestampByMill()-newTime, userID, len(msgList)) + return nil +} + +func (db *MsgDatabase) DeleteMessageFromCache(ctx context.Context, userID string, msgs []*pbMsg.MsgDataToMQ) error { + return db.msgCache.DeleteMessageFromCache(ctx, userID, msgs) +} + +func (db *MsgDatabase) BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) { + //newTime := utils.GetCurrentTimestampByMill() + lenList := len(msgList) + if lenList > db.msg.GetSingleGocMsgNum() { + return 0, errors.New("too large") + } + if lenList < 1 { + return 0, errors.New("too short as 0") + } + // judge sessionType to get seq + var currentMaxSeq uint64 + var err error + if msgList[0].MsgData.SessionType == constant.SuperGroupChatType { + currentMaxSeq, err = db.msgCache.GetGroupMaxSeq(ctx, sourceID) + //log.Debug(operationID, "constant.SuperGroupChatType lastMaxSeq before add ", currentMaxSeq, "userID ", sourceID, err) + } else { + currentMaxSeq, err = db.msgCache.GetUserMaxSeq(ctx, sourceID) + //log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", sourceID, err) + } + if err != nil && err != redis.Nil { + prome.PromeInc(prome.SeqGetFailedCounter) + return 0, utils.Wrap(err, "") + } + prome.PromeInc(prome.SeqGetSuccessCounter) + lastMaxSeq := currentMaxSeq + for _, m := range msgList { + currentMaxSeq++ + m.MsgData.Seq = uint32(currentMaxSeq) + //log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", sourceID, "seq: ", currentMaxSeq) + } + //log.Debug(operationID, "SetMessageToCache ", sourceID, len(msgList)) + failedNum, err := db.msgCache.SetMessageToCache(ctx, sourceID, msgList) + if err != nil { + prome.PromeAdd(prome.MsgInsertRedisFailedCounter, failedNum) + //log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), sourceID) + } else { + prome.PromeInc(prome.MsgInsertRedisSuccessCounter) + } + //log.Debug(operationID, "batch to redis cost time ", mongo2.getCurrentTimestampByMill()-newTime, sourceID, len(msgList)) + if msgList[0].MsgData.SessionType == constant.SuperGroupChatType { + err = db.msgCache.SetGroupMaxSeq(ctx, sourceID, currentMaxSeq) + } else { + err = db.msgCache.SetUserMaxSeq(ctx, sourceID, currentMaxSeq) + } + if err != nil { + prome.PromeInc(prome.SeqSetFailedCounter) + } else { + prome.PromeInc(prome.SeqSetSuccessCounter) + } + return lastMaxSeq, utils.Wrap(err, "") +} + +func (db *MsgDatabase) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) { + sortkeys.Uint32s(seqs) + docIDSeqsMap := db.msg.GetDocIDSeqsMap(userID, seqs) + lock := sync.Mutex{} + var wg sync.WaitGroup + wg.Add(len(docIDSeqsMap)) + for k, v := range docIDSeqsMap { + go func(docID string, seqs []uint32) { + defer wg.Done() + unExistSeqList, err := db.DelMsgBySeqsInOneDoc(ctx, docID, seqs) + if err != nil { + return + } + lock.Lock() + totalUnExistSeqs = append(totalUnExistSeqs, unExistSeqList...) + lock.Unlock() + }(k, v) + } + return totalUnExistSeqs, nil +} + +func (db *MsgDatabase) DelMsgBySeqsInOneDoc(ctx context.Context, docID string, seqs []uint32) (unExistSeqs []uint32, err error) { + seqMsgs, indexes, unExistSeqs, err := db.GetMsgAndIndexBySeqsInOneDoc(ctx, docID, seqs) + if err != nil { + return nil, err + } + for i, v := range seqMsgs { + if err = db.msgModel.UpdateMsgStatusByIndexInOneDoc(ctx, docID, v, indexes[i], constant.MsgDeleted); err != nil { + return nil, err + } + } + return unExistSeqs, nil +} + +func (db *MsgDatabase) GetMsgAndIndexBySeqsInOneDoc(ctx context.Context, docID string, seqs []uint32) (seqMsgs []*sdkws.MsgData, indexes []int, unExistSeqs []uint32, err error) { + doc, err := db.msgModel.FindOneByDocID(ctx, docID) + if err != nil { + return nil, nil, nil, err + } + singleCount := 0 + var hasSeqList []uint32 + for i := 0; i < len(doc.Msg); i++ { + msgPb, err := db.unmarshalMsg(&doc.Msg[i]) + if err != nil { + return nil, nil, nil, err + } + if utils.Contain(msgPb.Seq, seqs) { + indexes = append(indexes, i) + seqMsgs = append(seqMsgs, msgPb) + hasSeqList = append(hasSeqList, msgPb.Seq) + singleCount++ + if singleCount == len(seqs) { + break + } + } + } + for _, i := range seqs { + if utils.Contain(i, hasSeqList) { + continue + } + unExistSeqs = append(unExistSeqs, i) + } + return seqMsgs, indexes, unExistSeqs, nil +} + +func (db *MsgDatabase) GetNewestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) { + msgInfo, err := db.msgModel.GetNewestMsg(ctx, sourceID) + if err != nil { + return nil, err + } + return db.unmarshalMsg(msgInfo) +} + +func (db *MsgDatabase) GetOldestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) { + msgInfo, err := db.msgModel.GetOldestMsg(ctx, sourceID) + if err != nil { + return nil, err + } + return db.unmarshalMsg(msgInfo) +} + +func (db *MsgDatabase) unmarshalMsg(msgInfo *unRelationTb.MsgInfoModel) (msgPb *sdkws.MsgData, err error) { + msgPb = &sdkws.MsgData{} + err = proto.Unmarshal(msgInfo.Msg, msgPb) + if err != nil { + return nil, utils.Wrap(err, "") + } + return msgPb, nil +} + +func (db *MsgDatabase) getMsgBySeqs(ctx context.Context, sourceID string, seqs []uint32, diffusionType int) (seqMsg []*sdkws.MsgData, err error) { + var hasSeqs []uint32 + singleCount := 0 + m := db.msg.GetDocIDSeqsMap(sourceID, seqs) + for docID, value := range m { + doc, err := db.msgModel.FindOneByDocID(ctx, docID) + if err != nil { + //log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error()) + continue + } + singleCount = 0 + for i := 0; i < len(doc.Msg); i++ { + msgPb, err := db.unmarshalMsg(&doc.Msg[i]) + if err != nil { + //log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error()) + return nil, err + } + if utils.Contain(msgPb.Seq, value) { + seqMsg = append(seqMsg, msgPb) + hasSeqs = append(hasSeqs, msgPb.Seq) + singleCount++ + if singleCount == len(value) { + break + } + } + } + } + if len(hasSeqs) != len(seqs) { + var diff []uint32 + var exceptionMsg []*sdkws.MsgData + diff = utils.Difference(hasSeqs, seqs) + if diffusionType == constant.WriteDiffusion { + exceptionMsg = db.msg.GenExceptionMessageBySeqs(diff) + } else if diffusionType == constant.ReadDiffusion { + exceptionMsg = db.msg.GenExceptionSuperGroupMessageBySeqs(diff, sourceID) + } + seqMsg = append(seqMsg, exceptionMsg...) + } + return seqMsg, nil +} + +func (db *MsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) { + return db.getMsgBySeqs(ctx, userID, seqs, constant.WriteDiffusion) +} + +func (db *MsgDatabase) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) { + return db.getMsgBySeqs(ctx, groupID, seqs, constant.ReadDiffusion) +} + +func (db *MsgDatabase) CleanUpUserMsgFromMongo(ctx context.Context, userID string) error { + maxSeq, err := db.msgCache.GetUserMaxSeq(ctx, userID) + if err == redis.Nil { + return nil + } + if err != nil { + return err + } + docIDs := db.msg.GetSeqDocIDList(userID, uint32(maxSeq)) + err = db.msgModel.Delete(ctx, docIDs) + if err == mongo.ErrNoDocuments { + return nil + } + if err != nil { + return err + } + err = db.msgCache.SetUserMinSeq(ctx, userID, maxSeq) + return utils.Wrap(err, "") +} + +func (db *MsgDatabase) DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userIDs []string, remainTime int64) error { + var delStruct delMsgRecursionStruct + minSeq, err := db.deleteMsgRecursion(ctx, groupID, unRelationTb.OldestList, &delStruct, remainTime) + if err != nil { + //log.NewError(operationID, utils.GetSelfFuncName(), groupID, "deleteMsg failed") + } + if minSeq == 0 { + return nil + } + //log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDList:", delStruct, "minSeq", minSeq) + for _, userID := range userIDs { + userMinSeq, err := db.msgCache.GetGroupUserMinSeq(ctx, groupID, userID) + if err != nil && err != redis.Nil { + //log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error()) + continue + } + if userMinSeq > uint64(minSeq) { + err = db.msgCache.SetGroupUserMinSeq(ctx, groupID, userID, userMinSeq) + } else { + err = db.msgCache.SetGroupUserMinSeq(ctx, groupID, userID, uint64(minSeq)) + } + if err != nil { + //log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userID, userMinSeq, minSeq) + } + } + return nil +} + +func (db *MsgDatabase) DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error { + var delStruct delMsgRecursionStruct + minSeq, err := db.deleteMsgRecursion(ctx, userID, unRelationTb.OldestList, &delStruct, remainTime) + if err != nil { + return utils.Wrap(err, "") + } + if minSeq == 0 { + return nil + } + return db.msgCache.SetUserMinSeq(ctx, userID, uint64(minSeq)) +} + +// this is struct for recursion +type delMsgRecursionStruct struct { + minSeq uint32 + delDocIDList []string +} + +func (d *delMsgRecursionStruct) getSetMinSeq() uint32 { + return d.minSeq +} + +// index 0....19(del) 20...69 +// seq 70 +// set minSeq 21 +// recursion 删除list并且返回设置的最小seq +func (db *MsgDatabase) deleteMsgRecursion(ctx context.Context, sourceID string, index int64, delStruct *delMsgRecursionStruct, remainTime int64) (uint32, error) { + // find from oldest list + msgs, err := db.msgModel.GetMsgsByIndex(ctx, sourceID, index) + if err != nil || msgs.DocID == "" { + if err != nil { + if err == unrelation.ErrMsgListNotExist { + //log.NewInfo(operationID, utils.GetSelfFuncName(), "ID:", sourceID, "index:", index, err.Error()) + } else { + //log.NewError(operationID, utils.GetSelfFuncName(), "GetUserMsgListByIndex failed", err.Error(), index, ID) + } + } + // 获取报错,或者获取不到了,物理删除并且返回seq delMongoMsgsPhysical(delStruct.delDocIDList) + err = db.msgModel.Delete(ctx, delStruct.delDocIDList) + if err != nil { + return 0, err + } + return delStruct.getSetMinSeq() + 1, nil + } + //log.NewDebug(operationID, "ID:", sourceID, "index:", index, "uid:", msgs.UID, "len:", len(msgs.Msg)) + if len(msgs.Msg) > db.msg.GetSingleGocMsgNum() { + log.NewWarn(tracelog.GetOperationID(ctx), utils.GetSelfFuncName(), "msgs too large:", len(msgs.Msg), "docID:", msgs.DocID) + } + if msgs.Msg[len(msgs.Msg)-1].SendTime+(remainTime*1000) < utils.GetCurrentTimestampByMill() && msgs.IsFull() { + delStruct.delDocIDList = append(delStruct.delDocIDList, msgs.DocID) + lastMsgPb := &sdkws.MsgData{} + err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb) + if err != nil { + //log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID) + return 0, utils.Wrap(err, "proto.Unmarshal failed") + } + delStruct.minSeq = lastMsgPb.Seq + } else { + var hasMarkDelFlag bool + for _, msg := range msgs.Msg { + msgPb := &sdkws.MsgData{} + err = proto.Unmarshal(msg.Msg, msgPb) + if err != nil { + //log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID) + return 0, utils.Wrap(err, "proto.Unmarshal failed") + } + if utils.GetCurrentTimestampByMill() > msg.SendTime+(remainTime*1000) { + msgPb.Status = constant.MsgDeleted + bytes, _ := proto.Marshal(msgPb) + msg.Msg = bytes + msg.SendTime = 0 + hasMarkDelFlag = true + } else { + if err := db.msgModel.Delete(ctx, delStruct.delDocIDList); err != nil { + return 0, err + } + if hasMarkDelFlag { + if err := db.msgModel.UpdateOneDoc(ctx, msgs); err != nil { + return delStruct.getSetMinSeq(), utils.Wrap(err, "") + } + } + return msgPb.Seq + 1, nil + } + } + } + //log.NewDebug(operationID, sourceID, "continue to", delStruct) + // 继续递归 index+1 + seq, err := db.deleteMsgRecursion(ctx, sourceID, index+1, delStruct, remainTime) + return seq, utils.Wrap(err, "deleteMsg failed") } diff --git a/pkg/common/db/controller/user.go b/pkg/common/db/controller/user.go index 37ea34161..0cdab97e1 100644 --- a/pkg/common/db/controller/user.go +++ b/pkg/common/db/controller/user.go @@ -23,6 +23,8 @@ type UserInterface interface { Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error) //只要有一个存在就为true IsExist(ctx context.Context, userIDs []string) (exist bool, err error) + //获取所有用户ID + GetAllUserID(ctx context.Context) ([]string, error) } type UserController struct { @@ -55,6 +57,11 @@ func (u *UserController) Page(ctx context.Context, pageNumber, showNumber int32) func (u *UserController) IsExist(ctx context.Context, userIDs []string) (exist bool, err error) { return u.database.IsExist(ctx, userIDs) } + +func (u *UserController) GetAllUserID(ctx context.Context) ([]string, error) { + return u.database.GetAllUserID(ctx) +} + func NewUserController(db *gorm.DB) *UserController { controller := &UserController{database: newUserDatabase(db)} return controller @@ -75,6 +82,8 @@ type UserDatabaseInterface interface { Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error) //只要有一个存在就为true IsExist(ctx context.Context, userIDs []string) (exist bool, err error) + //获取所有用户ID + GetAllUserID(ctx context.Context) ([]string, error) } type UserDatabase struct { @@ -138,3 +147,7 @@ func (u *UserDatabase) IsExist(ctx context.Context, userIDs []string) (exist boo } return false, nil } + +func (u *UserDatabase) GetAllUserID(ctx context.Context) ([]string, error) { + return u.user.GetAllUserID(ctx) +} diff --git a/pkg/common/db/relation/user_model_k.go b/pkg/common/db/relation/user_model_k.go index c57adb7de..a3b573d94 100644 --- a/pkg/common/db/relation/user_model_k.go +++ b/pkg/common/db/relation/user_model_k.go @@ -100,3 +100,10 @@ func (u *UserGorm) Page(ctx context.Context, pageNumber, showNumber int32, tx .. err = utils.Wrap(getDBConn(u.DB, tx).Limit(int(showNumber)).Offset(int(pageNumber*showNumber)).Find(&users).Error, "") return } + +// 获取所有用户ID +func (u *UserGorm) GetAllUserID(ctx context.Context) ([]string, error) { + var userIDs []string + err := u.DB.Pluck("user_id", &userIDs).Error + return userIDs, err +} diff --git a/pkg/common/db/table/unrelation/extend_msg_set.go b/pkg/common/db/table/unrelation/extend_msg_set.go index 617ca388a..e691776d3 100644 --- a/pkg/common/db/table/unrelation/extend_msg_set.go +++ b/pkg/common/db/table/unrelation/extend_msg_set.go @@ -1,7 +1,7 @@ package unrelation import ( - common "Open_IM/pkg/proto/sdkws" + "Open_IM/pkg/proto/sdkws" "context" "strconv" "strings" @@ -14,12 +14,12 @@ const ( ) type ExtendMsgSetModel struct { - SourceID string `bson:"source_id" json:"sourceID"` - SessionType int32 `bson:"session_type" json:"sessionType"` - ExtendMsgs map[string]ExtendMsg `bson:"extend_msgs" json:"extendMsgs"` - ExtendMsgNum int32 `bson:"extend_msg_num" json:"extendMsgNum"` - CreateTime int64 `bson:"create_time" json:"createTime"` // this block's create time - MaxMsgUpdateTime int64 `bson:"max_msg_update_time" json:"maxMsgUpdateTime"` // index find msg + SourceID string `bson:"source_id" json:"sourceID"` + SessionType int32 `bson:"session_type" json:"sessionType"` + ExtendMsgs map[string]ExtendMsgModel `bson:"extend_msgs" json:"extendMsgs"` + ExtendMsgNum int32 `bson:"extend_msg_num" json:"extendMsgNum"` + CreateTime int64 `bson:"create_time" json:"createTime"` // this block's create time + MaxMsgUpdateTime int64 `bson:"max_msg_update_time" json:"maxMsgUpdateTime"` // index find msg } type KeyValueModel struct { @@ -28,7 +28,7 @@ type KeyValueModel struct { LatestUpdateTime int64 `bson:"latest_update_time" json:"latestUpdateTime"` } -type ExtendMsg struct { +type ExtendMsgModel struct { ReactionExtensionList map[string]KeyValueModel `bson:"reaction_extension_list" json:"reactionExtensionList"` ClientMsgID string `bson:"client_msg_id" json:"clientMsgID"` MsgFirstModifyTime int64 `bson:"msg_first_modify_time" json:"msgFirstModifyTime"` // this extendMsg create time @@ -36,6 +36,16 @@ type ExtendMsg struct { Ex string `bson:"ex" json:"ex"` } +type ExtendMsgSetModelInterface interface { + CreateExtendMsgSet(ctx context.Context, set *ExtendMsgSetModel) error + GetAllExtendMsgSet(ctx context.Context, sourceID string, opts *GetAllExtendMsgSetOpts) (sets []*ExtendMsgSetModel, err error) + GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*ExtendMsgSetModel, error) + InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *ExtendMsgModel) error + InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error + TakeExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *ExtendMsgModel, err error) +} + func (ExtendMsgSetModel) TableName() string { return CExtendMsgSet } @@ -57,13 +67,3 @@ func (e *ExtendMsgSetModel) SplitSourceIDAndGetIndex() int32 { type GetAllExtendMsgSetOpts struct { ExcludeExtendMsgs bool } - -type ExtendMsgSetInterface interface { - CreateExtendMsgSet(ctx context.Context, set *ExtendMsgSetModel) error - GetAllExtendMsgSet(ctx context.Context, ID string, opts *GetAllExtendMsgSetOpts) (sets []*ExtendMsgSetModel, err error) - GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*ExtendMsgSetModel, error) - InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *ExtendMsg) error - InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*common.KeyValue) error - DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*common.KeyValue) error - GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *ExtendMsg, err error) -} diff --git a/pkg/common/db/table/unrelation/msg.go b/pkg/common/db/table/unrelation/msg.go index b212ed9a7..78adb1e8f 100644 --- a/pkg/common/db/table/unrelation/msg.go +++ b/pkg/common/db/table/unrelation/msg.go @@ -3,15 +3,19 @@ package unrelation import ( "Open_IM/pkg/common/constant" "Open_IM/pkg/proto/sdkws" + "context" "strconv" + "strings" ) const ( singleGocMsgNum = 5000 CChat = "msg" + OldestList = 0 + NewestList = -1 ) -type UserMsgDocModel struct { +type MsgDocModel struct { DocID string `bson:"uid"` Msg []MsgInfoModel `bson:"msg"` } @@ -21,53 +25,79 @@ type MsgInfoModel struct { Msg []byte `bson:"msg"` } -func (UserMsgDocModel) TableName() string { +type MsgDocModelInterface interface { + PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []MsgInfoModel) error + Create(ctx context.Context, model *MsgDocModel) error + UpdateMsgStatusByIndexInOneDoc(ctx context.Context, docID string, msg *sdkws.MsgData, seqIndex int, status int32) error + FindOneByDocID(ctx context.Context, docID string) (*MsgDocModel, error) + GetNewestMsg(ctx context.Context, sourceID string) (*MsgInfoModel, error) + GetOldestMsg(ctx context.Context, sourceID string) (*MsgInfoModel, error) + Delete(ctx context.Context, docIDs []string) error + GetMsgsByIndex(ctx context.Context, sourceID string, index int64) (*MsgDocModel, error) + UpdateOneDoc(ctx context.Context, msg *MsgDocModel) error +} + +func (MsgDocModel) TableName() string { return CChat } -func (UserMsgDocModel) GetSingleDocMsgNum() int { +func (MsgDocModel) GetSingleGocMsgNum() int { return singleGocMsgNum } -func (u UserMsgDocModel) getSeqUid(uid string, seq uint32) string { - seqSuffix := seq / singleGocMsgNum - return u.indexGen(uid, seqSuffix) -} - -func (u UserMsgDocModel) getSeqUserIDList(userID string, maxSeq uint32) []string { - seqMaxSuffix := maxSeq / singleGocMsgNum - var seqUserIDList []string - for i := 0; i <= int(seqMaxSuffix); i++ { - seqUserID := u.indexGen(userID, uint32(i)) - seqUserIDList = append(seqUserIDList, seqUserID) +func (m *MsgDocModel) IsFull() bool { + index, _ := strconv.Atoi(strings.Split(m.DocID, ":")[1]) + if index == 0 { + if len(m.Msg) >= singleGocMsgNum-1 { + return true + } } - return seqUserIDList + if len(m.Msg) >= singleGocMsgNum { + return true + } + + return false } -func (UserMsgDocModel) getSeqSuperGroupID(groupID string, seq uint32) string { +func (m MsgDocModel) GetDocID(sourceID string, seq uint32) string { seqSuffix := seq / singleGocMsgNum - return superGroupIndexGen(groupID, seqSuffix) + return m.indexGen(sourceID, seqSuffix) } -func (u UserMsgDocModel) GetSeqUid(uid string, seq uint32) string { - return u.getSeqUid(uid, seq) +func (m MsgDocModel) GetSeqDocIDList(userID string, maxSeq uint32) []string { + seqMaxSuffix := maxSeq / singleGocMsgNum + var seqUserIDs []string + for i := 0; i <= int(seqMaxSuffix); i++ { + seqUserID := m.indexGen(userID, uint32(i)) + seqUserIDs = append(seqUserIDs, seqUserID) + } + return seqUserIDs } -func (u UserMsgDocModel) GetDocIDSeqsMap(uid string, seqs []uint32) map[string][]uint32 { +func (m MsgDocModel) getSeqSuperGroupID(groupID string, seq uint32) string { + seqSuffix := seq / singleGocMsgNum + return m.superGroupIndexGen(groupID, seqSuffix) +} + +func (m MsgDocModel) superGroupIndexGen(groupID string, seqSuffix uint32) string { + return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10) +} + +func (m MsgDocModel) GetDocIDSeqsMap(sourceID string, seqs []uint32) map[string][]uint32 { t := make(map[string][]uint32) for i := 0; i < len(seqs); i++ { - seqUid := u.getSeqUid(uid, seqs[i]) - if value, ok := t[seqUid]; !ok { + docID := m.GetDocID(sourceID, seqs[i]) + if value, ok := t[docID]; !ok { var temp []uint32 - t[seqUid] = append(temp, seqs[i]) + t[docID] = append(temp, seqs[i]) } else { - t[seqUid] = append(value, seqs[i]) + t[docID] = append(value, seqs[i]) } } return t } -func (UserMsgDocModel) getMsgIndex(seq uint32) int { +func (m MsgDocModel) getMsgIndex(seq uint32) int { seqSuffix := seq / singleGocMsgNum var index uint32 if seqSuffix == 0 { @@ -78,12 +108,12 @@ func (UserMsgDocModel) getMsgIndex(seq uint32) int { return int(index) } -func (UserMsgDocModel) indexGen(uid string, seqSuffix uint32) string { - return uid + ":" + strconv.FormatInt(int64(seqSuffix), 10) +func (m MsgDocModel) indexGen(sourceID string, seqSuffix uint32) string { + return sourceID + ":" + strconv.FormatInt(int64(seqSuffix), 10) } -func (UserMsgDocModel) genExceptionMessageBySeqList(seqList []uint32) (exceptionMsg []*sdkws.MsgData) { - for _, v := range seqList { +func (MsgDocModel) GenExceptionMessageBySeqs(seqs []uint32) (exceptionMsg []*sdkws.MsgData) { + for _, v := range seqs { msg := new(sdkws.MsgData) msg.Seq = v exceptionMsg = append(exceptionMsg, msg) @@ -91,8 +121,8 @@ func (UserMsgDocModel) genExceptionMessageBySeqList(seqList []uint32) (exception return exceptionMsg } -func (UserMsgDocModel) genExceptionSuperGroupMessageBySeqList(seqList []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) { - for _, v := range seqList { +func (MsgDocModel) GenExceptionSuperGroupMessageBySeqs(seqs []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) { + for _, v := range seqs { msg := new(sdkws.MsgData) msg.Seq = v msg.GroupID = groupID diff --git a/pkg/common/db/table/unrelation/super_group.go b/pkg/common/db/table/unrelation/super_group.go index d02a534aa..6f4409f64 100644 --- a/pkg/common/db/table/unrelation/super_group.go +++ b/pkg/common/db/table/unrelation/super_group.go @@ -37,7 +37,3 @@ type SuperGroupModelInterface interface { DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error } - -func superGroupIndexGen(groupID string, seqSuffix uint32) string { - return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10) -} diff --git a/pkg/common/db/unrelation/batch_insert_chat.go b/pkg/common/db/unrelation/batch_insert_chat.go deleted file mode 100644 index ce78e4725..000000000 --- a/pkg/common/db/unrelation/batch_insert_chat.go +++ /dev/null @@ -1,171 +0,0 @@ -package unrelation - -import ( - "Open_IM/pkg/common/config" - "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db" - "Open_IM/pkg/common/log" - promePkg "Open_IM/pkg/common/prometheus" - pbMsg "Open_IM/pkg/proto/msg" - "Open_IM/pkg/utils" - "context" - "errors" - go_redis "github.com/go-redis/redis/v8" - "github.com/golang/protobuf/proto" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" -) - -func (d *db.DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string, currentMaxSeq uint64) error { - newTime := getCurrentTimestampByMill() - if len(msgList) > GetSingleGocMsgNum() { - return errors.New("too large") - } - isInit := false - var remain uint64 - blk0 := uint64(GetSingleGocMsgNum() - 1) - //currentMaxSeq 4998 - if currentMaxSeq < uint64(mongo2.GetSingleGocMsgNum()) { - remain = blk0 - currentMaxSeq //1 - } else { - excludeBlk0 := currentMaxSeq - blk0 //=1 - //(5000-1)%5000 == 4999 - remain = (uint64(mongo2.GetSingleGocMsgNum()) - (excludeBlk0 % uint64(mongo2.GetSingleGocMsgNum()))) % uint64(mongo2.GetSingleGocMsgNum()) - } - //remain=1 - insertCounter := uint64(0) - msgListToMongo := make([]mongo2.MsgInfo, 0) - msgListToMongoNext := make([]mongo2.MsgInfo, 0) - seqUid := "" - seqUidNext := "" - log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList)) - var err error - for _, m := range msgList { - log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID) - currentMaxSeq++ - sMsg := mongo2.MsgInfo{} - sMsg.SendTime = m.MsgData.SendTime - m.MsgData.Seq = uint32(currentMaxSeq) - log.Debug(operationID, "mongo msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", userID, "seq: ", currentMaxSeq) - if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { - return utils.Wrap(err, "") - } - if isInit { - msgListToMongoNext = append(msgListToMongoNext, sMsg) - seqUidNext = mongo2.getSeqUid(userID, uint32(currentMaxSeq)) - log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain) - continue - } - if insertCounter < remain { - msgListToMongo = append(msgListToMongo, sMsg) - insertCounter++ - seqUid = mongo2.getSeqUid(userID, uint32(currentMaxSeq)) - log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID) - } else { - msgListToMongoNext = append(msgListToMongoNext, sMsg) - seqUidNext = mongo2.getSeqUid(userID, uint32(currentMaxSeq)) - log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID) - } - } - - ctx := context.Background() - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(mongo2.cChat) - - if seqUid != "" { - filter := bson.M{"uid": seqUid} - log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo, "userID: ", userID) - err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err() - if err != nil { - if err == mongo.ErrNoDocuments { - filter := bson.M{"uid": seqUid} - sChat := mongo2.UserChat{} - sChat.UID = seqUid - sChat.Msg = msgListToMongo - log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo) - if _, err = c.InsertOne(ctx, &sChat); err != nil { - promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter) - log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) - return utils.Wrap(err, "") - } - promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter) - } else { - promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter) - log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter) - return utils.Wrap(err, "") - } - } else { - promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter) - } - } - if seqUidNext != "" { - filter := bson.M{"uid": seqUidNext} - sChat := mongo2.UserChat{} - sChat.UID = seqUidNext - sChat.Msg = msgListToMongoNext - log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID) - if _, err = c.InsertOne(ctx, &sChat); err != nil { - promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter) - log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) - return utils.Wrap(err, "") - } - promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter) - } - log.Debug(operationID, "batch mgo cost time ", mongo2.getCurrentTimestampByMill()-newTime, userID, len(msgList)) - return nil -} - -func (d *db.DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgDataToMQ, operationID string) (error, uint64) { - newTime := mongo2.getCurrentTimestampByMill() - lenList := len(msgList) - if lenList > mongo2.GetSingleGocMsgNum() { - return errors.New("too large"), 0 - } - if lenList < 1 { - return errors.New("too short as 0"), 0 - } - // judge sessionType to get seq - var currentMaxSeq uint64 - var err error - if msgList[0].MsgData.SessionType == constant.SuperGroupChatType { - currentMaxSeq, err = d.GetGroupMaxSeq(insertID) - log.Debug(operationID, "constant.SuperGroupChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err) - } else { - currentMaxSeq, err = d.GetUserMaxSeq(insertID) - log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err) - } - if err != nil && err != go_redis.Nil { - promePkg.PromeInc(promePkg.SeqGetFailedCounter) - return utils.Wrap(err, ""), 0 - } - promePkg.PromeInc(promePkg.SeqGetSuccessCounter) - - lastMaxSeq := currentMaxSeq - for _, m := range msgList { - - currentMaxSeq++ - sMsg := mongo2.MsgInfo{} - sMsg.SendTime = m.MsgData.SendTime - m.MsgData.Seq = uint32(currentMaxSeq) - log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", insertID, "seq: ", currentMaxSeq) - } - log.Debug(operationID, "SetMessageToCache ", insertID, len(msgList)) - err, failedNum := d.SetMessageToCache(msgList, insertID, operationID) - if err != nil { - promePkg.PromeAdd(promePkg.MsgInsertRedisFailedCounter, failedNum) - log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), insertID) - } else { - promePkg.PromeInc(promePkg.MsgInsertRedisSuccessCounter) - } - log.Debug(operationID, "batch to redis cost time ", mongo2.getCurrentTimestampByMill()-newTime, insertID, len(msgList)) - if msgList[0].MsgData.SessionType == constant.SuperGroupChatType { - err = d.SetGroupMaxSeq(insertID, currentMaxSeq) - } else { - err = d.SetUserMaxSeq(insertID, currentMaxSeq) - } - if err != nil { - promePkg.PromeInc(promePkg.SeqSetFailedCounter) - } else { - promePkg.PromeInc(promePkg.SeqSetSuccessCounter) - } - return utils.Wrap(err, ""), lastMaxSeq -} diff --git a/pkg/common/db/unrelation/extend_msg.go b/pkg/common/db/unrelation/extend_msg.go index 7f8564a46..44184c56d 100644 --- a/pkg/common/db/unrelation/extend_msg.go +++ b/pkg/common/db/unrelation/extend_msg.go @@ -1,7 +1,7 @@ package unrelation import ( - "Open_IM/pkg/common/db/table/unrelation" + unRelationTb "Open_IM/pkg/common/db/table/unrelation" "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" "context" @@ -19,19 +19,15 @@ type ExtendMsgSetMongoDriver struct { } func NewExtendMsgSetMongoDriver(mgoDB *mongo.Database) *ExtendMsgSetMongoDriver { - return &ExtendMsgSetMongoDriver{mgoDB: mgoDB, ExtendMsgSetCollection: mgoDB.Collection(unrelation.CExtendMsgSet)} + return &ExtendMsgSetMongoDriver{mgoDB: mgoDB, ExtendMsgSetCollection: mgoDB.Collection(unRelationTb.CExtendMsgSet)} } -func (e *ExtendMsgSetMongoDriver) CreateExtendMsgSet(ctx context.Context, set *unrelation.ExtendMsgSet) error { +func (e *ExtendMsgSetMongoDriver) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error { _, err := e.ExtendMsgSetCollection.InsertOne(ctx, set) return err } -type GetAllExtendMsgSetOpts struct { - ExcludeExtendMsgs bool -} - -func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID string, opts *GetAllExtendMsgSetOpts) (sets []*unrelation.ExtendMsgSet, err error) { +func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) { regex := fmt.Sprintf("^%s", ID) var findOpts *options.FindOptions if opts != nil { @@ -51,7 +47,7 @@ func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID str return sets, nil } -func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unrelation.ExtendMsgSet, error) { +func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) { var err error findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{"extend_msgs": 0}) // update newest @@ -63,7 +59,7 @@ func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID if err != nil { return nil, utils.Wrap(err, "") } - var setList []unrelation.ExtendMsgSet + var setList []unRelationTb.ExtendMsgSetModel if err := result.All(ctx, &setList); err != nil { return nil, utils.Wrap(err, "") } @@ -74,7 +70,7 @@ func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID } // first modify msg -func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unrelation.ExtendMsg) error { +func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error { set, err := e.GetExtendMsgSet(ctx, sourceID, sessionType, 0) if err != nil { return utils.Wrap(err, "") @@ -84,10 +80,10 @@ func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID if set != nil { index = set.SplitSourceIDAndGetIndex() } - err = e.CreateExtendMsgSet(ctx, &unrelation.ExtendMsgSet{ + err = e.CreateExtendMsgSet(ctx, &unRelationTb.ExtendMsgSetModel{ SourceID: set.GetSourceID(sourceID, index), SessionType: sessionType, - ExtendMsgs: map[string]unrelation.ExtendMsg{msg.ClientMsgID: *msg}, + ExtendMsgs: map[string]unRelationTb.ExtendMsgModel{msg.ClientMsgID: *msg}, ExtendMsgNum: 1, CreateTime: msg.MsgFirstModifyTime, MaxMsgUpdateTime: msg.MsgFirstModifyTime, @@ -136,14 +132,14 @@ func (e *ExtendMsgSetMongoDriver) DeleteReactionExtendMsgSet(ctx context.Context return err } -func (e *ExtendMsgSetMongoDriver) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unrelation.ExtendMsg, err error) { +func (e *ExtendMsgSetMongoDriver) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) { findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{fmt.Sprintf("extend_msgs.%s", clientMsgID): 1}) regex := fmt.Sprintf("^%s", sourceID) result, err := e.ExtendMsgSetCollection.Find(ctx, bson.M{"source_id": primitive.Regex{Pattern: regex}, "session_type": sessionType, "max_msg_update_time": bson.M{"$lte": maxMsgUpdateTime}}, findOpts) if err != nil { return nil, utils.Wrap(err, "") } - var setList []unrelation.ExtendMsgSet + var setList []unRelationTb.ExtendMsgSetModel if err := result.All(ctx, &setList); err != nil { return nil, utils.Wrap(err, "") } diff --git a/pkg/common/db/unrelation/mongo.go b/pkg/common/db/unrelation/mongo.go index 75ae8c1b5..85e87b54f 100644 --- a/pkg/common/db/unrelation/mongo.go +++ b/pkg/common/db/unrelation/mongo.go @@ -62,17 +62,17 @@ func (m *Mongo) GetClient() *mongo.Client { } func (m *Mongo) CreateMsgIndex() { - if err := m.createMongoIndex(unrelation, false, "uid"); err != nil { + if err := m.createMongoIndex(unrelation.CChat, false, "uid"); err != nil { fmt.Println(err.Error() + " index create failed " + unrelation.CChat + " uid, please create index by yourself in field uid") } } func (m *Mongo) CreateSuperGroupIndex() { if err := m.createMongoIndex(unrelation.CSuperGroup, true, "group_id"); err != nil { - panic(err.Error() + "index create failed " + unrelation.CTag + " group_id") + panic(err.Error() + "index create failed " + unrelation.CSuperGroup + " group_id") } if err := m.createMongoIndex(unrelation.CUserToSuperGroup, true, "user_id"); err != nil { - panic(err.Error() + "index create failed " + unrelation.CTag + "user_id") + panic(err.Error() + "index create failed " + unrelation.CUserToSuperGroup + "user_id") } } diff --git a/pkg/common/db/unrelation/mongo_model.go b/pkg/common/db/unrelation/mongo_model.go deleted file mode 100644 index e0760c0c4..000000000 --- a/pkg/common/db/unrelation/mongo_model.go +++ /dev/null @@ -1,670 +0,0 @@ -package unrelation - - -// deleteMsgByLogic -//func (d *db.DataBases) DelMsgBySeqList(userID string, seqList []uint32, operationID string) (totalUnexistSeqList []uint32, err error) { -// log.Debug(operationID, utils.GetSelfFuncName(), "args ", userID, seqList) -// sortkeys.Uint32s(seqList) -// suffixUserID2SubSeqList := func(uid string, seqList []uint32) map[string][]uint32 { -// t := make(map[string][]uint32) -// for i := 0; i < len(seqList); i++ { -// seqUid := getSeqUid(uid, seqList[i]) -// if value, ok := t[seqUid]; !ok { -// var temp []uint32 -// t[seqUid] = append(temp, seqList[i]) -// } else { -// t[seqUid] = append(value, seqList[i]) -// } -// } -// return t -// }(userID, seqList) -// -// lock := sync.Mutex{} -// var wg sync.WaitGroup -// wg.Add(len(suffixUserID2SubSeqList)) -// for k, v := range suffixUserID2SubSeqList { -// go func(suffixUserID string, subSeqList []uint32, operationID string) { -// defer wg.Done() -// unexistSeqList, err := d.DelMsgBySeqListInOneDoc(suffixUserID, subSeqList, operationID) -// if err != nil { -// log.Error(operationID, "DelMsgBySeqListInOneDoc failed ", err.Error(), suffixUserID, subSeqList) -// return -// } -// lock.Lock() -// totalUnexistSeqList = append(totalUnexistSeqList, unexistSeqList...) -// lock.Unlock() -// }(k, v, operationID) -// } -// return totalUnexistSeqList, err -//} -// -//func (d *db.DataBases) DelMsgBySeqListInOneDoc(suffixUserID string, seqList []uint32, operationID string) ([]uint32, error) { -// log.Debug(operationID, utils.GetSelfFuncName(), "args ", suffixUserID, seqList) -// seqMsgList, indexList, unexistSeqList, err := d.GetMsgAndIndexBySeqListInOneMongo2(suffixUserID, seqList, operationID) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// for i, v := range seqMsgList { -// if err := d.ReplaceMsgByIndex(suffixUserID, v, operationID, indexList[i]); err != nil { -// return nil, utils.Wrap(err, "") -// } -// } -// return unexistSeqList, nil -//} - -// deleteMsgByLogic -//func (d *db.DataBases) DelMsgLogic(uid string, seqList []uint32, operationID string) error { -// sortkeys.Uint32s(seqList) -// seqMsgs, err := d.GetMsgBySeqListMongo2(uid, seqList, operationID) -// if err != nil { -// return utils.Wrap(err, "") -// } -// for _, seqMsg := range seqMsgs { -// log.NewDebug(operationID, utils.GetSelfFuncName(), *seqMsg) -// seqMsg.Status = constant.MsgDeleted -// if err = d.ReplaceMsgBySeq(uid, seqMsg, operationID); err != nil { -// log.NewError(operationID, utils.GetSelfFuncName(), "ReplaceMsgListBySeq error", err.Error()) -// } -// } -// return nil -//} - -//func (d *db.DataBases) ReplaceMsgByIndex(suffixUserID string, msg *sdkws.MsgData, operationID string, seqIndex int) error { -// log.NewInfo(operationID, utils.GetSelfFuncName(), suffixUserID, *msg) -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// s := fmt.Sprintf("msg.%d.msg", seqIndex) -// log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s) -// msg.Status = constant.MsgDeleted -// bytes, err := proto.Marshal(msg) -// if err != nil { -// log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal failed ", err.Error(), msg.String()) -// return utils.Wrap(err, "") -// } -// updateResult, err := c.UpdateOne(ctx, bson.M{"uid": suffixUserID}, bson.M{"$set": bson.M{s: bytes}}) -// log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult) -// if err != nil { -// log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error()) -// return utils.Wrap(err, "") -// } -// return nil -//} - -//func (d *db.DataBases) ReplaceMsgBySeq(uid string, msg *sdkws.MsgData, operationID string) error { -// log.NewInfo(operationID, utils.GetSelfFuncName(), uid, *msg) -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// uid = getSeqUid(uid, msg.Seq) -// seqIndex := getMsgIndex(msg.Seq) -// s := fmt.Sprintf("msg.%d.msg", seqIndex) -// log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s) -// bytes, err := proto.Marshal(msg) -// if err != nil { -// log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal", err.Error()) -// return utils.Wrap(err, "") -// } -// -// updateResult, err := c.UpdateOne( -// ctx, bson.M{"uid": uid}, -// bson.M{"$set": bson.M{s: bytes}}) -// log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult) -// if err != nil { -// log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error()) -// return utils.Wrap(err, "") -// } -// return nil -//} -// -//func (d *db.DataBases) UpdateOneMsgList(msg *UserChat) error { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// _, err := c.UpdateOne(ctx, bson.M{"uid": msg.UID}, bson.M{"$set": bson.M{"msg": msg.Msg}}) -// return err -//} -// -//func (d *db.DataBases) GetMsgBySeqList(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { -// log.NewInfo(operationID, utils.GetSelfFuncName(), uid, seqList) -// var hasSeqList []uint32 -// singleCount := 0 -// session := d.mgoSession.Clone() -// if session == nil { -// return nil, errors.New("session == nil") -// } -// defer session.Close() -// c := session.DB(config.Config.Mongo.DBDatabase).C(cChat) -// m := func(uid string, seqList []uint32) map[string][]uint32 { -// t := make(map[string][]uint32) -// for i := 0; i < len(seqList); i++ { -// seqUid := getSeqUid(uid, seqList[i]) -// if value, ok := t[seqUid]; !ok { -// var temp []uint32 -// t[seqUid] = append(temp, seqList[i]) -// } else { -// t[seqUid] = append(value, seqList[i]) -// } -// } -// return t -// }(uid, seqList) -// sChat := UserChat{} -// for seqUid, value := range m { -// if err = c.Find(bson.M{"uid": seqUid}).One(&sChat); err != nil { -// log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error()) -// continue -// } -// singleCount = 0 -// for i := 0; i < len(sChat.Msg); i++ { -// msg := new(sdkws.MsgData) -// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { -// log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error()) -// return nil, err -// } -// if isContainInt32(msg.Seq, value) { -// seqMsg = append(seqMsg, msg) -// hasSeqList = append(hasSeqList, msg.Seq) -// singleCount++ -// if singleCount == len(value) { -// break -// } -// } -// } -// } -// if len(hasSeqList) != len(seqList) { -// var diff []uint32 -// diff = utils.Difference(hasSeqList, seqList) -// exceptionMSg := genExceptionMessageBySeqList(diff) -// seqMsg = append(seqMsg, exceptionMSg...) -// -// } -// return seqMsg, nil -//} -// -//func (d *db.DataBases) GetUserMsgListByIndex(ID string, index int64) (*UserChat, error) { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// regex := fmt.Sprintf("^%s", ID) -// findOpts := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"uid": 1}) -// var msgs []UserChat -// //primitive.Regex{Pattern: regex} -// cursor, err := c.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: regex}}, findOpts) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// err = cursor.All(context.Background(), &msgs) -// if err != nil { -// return nil, utils.Wrap(err, fmt.Sprintf("cursor is %s", cursor.Current.String())) -// } -// if len(msgs) > 0 { -// return &msgs[0], nil -// } else { -// return nil, ErrMsgListNotExist -// } -//} -// -//func (d *db.DataBases) DelMongoMsgs(IDList []string) error { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// _, err := c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": IDList}}) -// return err -//} -// -//func (d *db.DataBases) ReplaceMsgToBlankByIndex(suffixID string, index int) (replaceMaxSeq uint32, err error) { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// userChat := &UserChat{} -// err = c.FindOne(ctx, bson.M{"uid": suffixID}).Decode(&userChat) -// if err != nil { -// return 0, err -// } -// for i, msg := range userChat.Msg { -// if i <= index { -// msgPb := &sdkws.MsgData{} -// if err = proto.Unmarshal(msg.Msg, msgPb); err != nil { -// continue -// } -// newMsgPb := &sdkws.MsgData{Seq: msgPb.Seq} -// bytes, err := proto.Marshal(newMsgPb) -// if err != nil { -// continue -// } -// msg.Msg = bytes -// msg.SendTime = 0 -// replaceMaxSeq = msgPb.Seq -// } -// } -// _, err = c.UpdateOne(ctx, bson.M{"uid": suffixID}, bson.M{"$set": bson.M{"msg": userChat.Msg}}) -// return replaceMaxSeq, err -//} -// -//func (d *db.DataBases) GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// regex := fmt.Sprintf("^%s", ID) -// findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": -1}) -// var userChats []UserChat -// cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts) -// if err != nil { -// return nil, err -// } -// err = cursor.All(ctx, &userChats) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// if len(userChats) > 0 { -// if len(userChats[0].Msg) > 0 { -// msgPb := &sdkws.MsgData{} -// err = proto.Unmarshal(userChats[0].Msg[len(userChats[0].Msg)-1].Msg, msgPb) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// return msgPb, nil -// } -// return nil, errors.New("len(userChats[0].Msg) < 0") -// } -// return nil, nil -//} -// -//func (d *db.DataBases) GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// regex := fmt.Sprintf("^%s", ID) -// findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": 1}) -// var userChats []UserChat -// cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts) -// if err != nil { -// return nil, err -// } -// err = cursor.All(ctx, &userChats) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// var oldestMsg []byte -// if len(userChats) > 0 { -// for _, v := range userChats[0].Msg { -// if v.SendTime != 0 { -// oldestMsg = v.Msg -// break -// } -// } -// if len(oldestMsg) == 0 { -// oldestMsg = userChats[0].Msg[len(userChats[0].Msg)-1].Msg -// } -// msgPb := &sdkws.MsgData{} -// err = proto.Unmarshal(oldestMsg, msgPb) -// if err != nil { -// return nil, utils.Wrap(err, "") -// } -// return msgPb, nil -// } -// return nil, nil -//} -// -//func (d *db.DataBases) GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { -// var hasSeqList []uint32 -// singleCount := 0 -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// -// m := func(uid string, seqList []uint32) map[string][]uint32 { -// t := make(map[string][]uint32) -// for i := 0; i < len(seqList); i++ { -// seqUid := getSeqUid(uid, seqList[i]) -// if value, ok := t[seqUid]; !ok { -// var temp []uint32 -// t[seqUid] = append(temp, seqList[i]) -// } else { -// t[seqUid] = append(value, seqList[i]) -// } -// } -// return t -// }(uid, seqList) -// sChat := UserChat{} -// for seqUid, value := range m { -// if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil { -// log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error()) -// continue -// } -// singleCount = 0 -// for i := 0; i < len(sChat.Msg); i++ { -// msg := new(sdkws.MsgData) -// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { -// log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error()) -// return nil, err -// } -// if isContainInt32(msg.Seq, value) { -// seqMsg = append(seqMsg, msg) -// hasSeqList = append(hasSeqList, msg.Seq) -// singleCount++ -// if singleCount == len(value) { -// break -// } -// } -// } -// } -// if len(hasSeqList) != len(seqList) { -// var diff []uint32 -// diff = utils.Difference(hasSeqList, seqList) -// exceptionMSg := genExceptionMessageBySeqList(diff) -// seqMsg = append(seqMsg, exceptionMSg...) -// -// } -// return seqMsg, nil -//} -//func (d *db.DataBases) GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { -// var hasSeqList []uint32 -// singleCount := 0 -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// -// m := func(uid string, seqList []uint32) map[string][]uint32 { -// t := make(map[string][]uint32) -// for i := 0; i < len(seqList); i++ { -// seqUid := getSeqUid(uid, seqList[i]) -// if value, ok := t[seqUid]; !ok { -// var temp []uint32 -// t[seqUid] = append(temp, seqList[i]) -// } else { -// t[seqUid] = append(value, seqList[i]) -// } -// } -// return t -// }(groupID, seqList) -// sChat := UserChat{} -// for seqUid, value := range m { -// if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil { -// log.NewError(operationID, "not find seqGroupID", seqUid, value, groupID, seqList, err.Error()) -// continue -// } -// singleCount = 0 -// for i := 0; i < len(sChat.Msg); i++ { -// msg := new(sdkws.MsgData) -// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { -// log.NewError(operationID, "Unmarshal err", seqUid, value, groupID, seqList, err.Error()) -// return nil, err -// } -// if isContainInt32(msg.Seq, value) { -// seqMsg = append(seqMsg, msg) -// hasSeqList = append(hasSeqList, msg.Seq) -// singleCount++ -// if singleCount == len(value) { -// break -// } -// } -// } -// } -// if len(hasSeqList) != len(seqList) { -// var diff []uint32 -// diff = utils.Difference(hasSeqList, seqList) -// exceptionMSg := genExceptionSuperGroupMessageBySeqList(diff, groupID) -// seqMsg = append(seqMsg, exceptionMSg...) -// -// } -// return seqMsg, nil -//} -// -//func (d *db.DataBases) GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unexistSeqList []uint32, err error) { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// sChat := UserChat{} -// if err = c.FindOne(ctx, bson.M{"uid": suffixUserID}).Decode(&sChat); err != nil { -// log.NewError(operationID, "not find seqUid", suffixUserID, err.Error()) -// return nil, nil, nil, utils.Wrap(err, "") -// } -// singleCount := 0 -// var hasSeqList []uint32 -// for i := 0; i < len(sChat.Msg); i++ { -// msg := new(sdkws.MsgData) -// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { -// log.NewError(operationID, "Unmarshal err", msg.String(), err.Error()) -// return nil, nil, nil, err -// } -// if isContainInt32(msg.Seq, seqList) { -// indexList = append(indexList, i) -// seqMsg = append(seqMsg, msg) -// hasSeqList = append(hasSeqList, msg.Seq) -// singleCount++ -// if singleCount == len(seqList) { -// break -// } -// } -// } -// for _, i := range seqList { -// if isContainInt32(i, hasSeqList) { -// continue -// } -// unexistSeqList = append(unexistSeqList, i) -// } -// return seqMsg, indexList, unexistSeqList, nil -//} -// -//func genExceptionMessageBySeqList(seqList []uint32) (exceptionMsg []*sdkws.MsgData) { -// for _, v := range seqList { -// msg := new(sdkws.MsgData) -// msg.Seq = v -// exceptionMsg = append(exceptionMsg, msg) -// } -// return exceptionMsg -//} -// -//func genExceptionSuperGroupMessageBySeqList(seqList []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) { -// for _, v := range seqList { -// msg := new(sdkws.MsgData) -// msg.Seq = v -// msg.GroupID = groupID -// msg.SessionType = constant.SuperGroupChatType -// exceptionMsg = append(exceptionMsg, msg) -// } -// return exceptionMsg -//} -// -//func (d *db.DataBases) SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// newTime := getCurrentTimestampByMill() -// operationID := "" -// seqUid := getSeqUid(uid, m.MsgData.Seq) -// filter := bson.M{"uid": seqUid} -// var err error -// sMsg := MsgInfo{} -// sMsg.SendTime = sendTime -// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { -// return utils.Wrap(err, "") -// } -// err = c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": sMsg}}).Err() -// log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime) -// if err != nil { -// sChat := UserChat{} -// sChat.UID = seqUid -// sChat.Msg = append(sChat.Msg, sMsg) -// if _, err = c.InsertOne(ctx, &sChat); err != nil { -// log.NewDebug(operationID, "InsertOne failed", filter) -// return utils.Wrap(err, "") -// } -// } else { -// log.NewDebug(operationID, "FindOneAndUpdate ok", filter) -// } -// -// log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime) -// return nil -//} - -// -//func (d *DataBases) SaveUserChatListMongo2(uid string, sendTime int64, msgList []*pbMsg.MsgDataToDB) error { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// newTime := getCurrentTimestampByMill() -// operationID := "" -// seqUid := "" -// msgListToMongo := make([]MsgInfo, 0) -// -// for _, m := range msgList { -// seqUid = getSeqUid(uid, m.MsgData.Seq) -// var err error -// sMsg := MsgInfo{} -// sMsg.SendTime = sendTime -// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { -// return utils.Wrap(err, "") -// } -// msgListToMongo = append(msgListToMongo, sMsg) -// } -// -// filter := bson.M{"uid": seqUid} -// log.NewDebug(operationID, "filter ", seqUid) -// err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err() -// log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime) -// if err != nil { -// sChat := UserChat{} -// sChat.UID = seqUid -// sChat.Msg = msgListToMongo -// -// if _, err = c.InsertOne(ctx, &sChat); err != nil { -// log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat) -// return utils.Wrap(err, "") -// } -// } else { -// log.NewDebug(operationID, "FindOneAndUpdate ok", filter) -// } -// -// log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime) -// return nil -//} - -//func (d *db.DataBases) SaveUserChat(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error { -// var seqUid string -// newTime := getCurrentTimestampByMill() -// session := d.mgoSession.Clone() -// if session == nil { -// return errors.New("session == nil") -// } -// defer session.Close() -// log.NewDebug("", "get mgoSession cost time", getCurrentTimestampByMill()-newTime) -// c := session.DB(config.Config.Mongo.DBDatabase).C(cChat) -// seqUid = getSeqUid(uid, m.MsgData.Seq) -// n, err := c.Find(bson.M{"uid": seqUid}).Count() -// if err != nil { -// return err -// } -// log.NewDebug("", "find mgo uid cost time", getCurrentTimestampByMill()-newTime) -// sMsg := MsgInfo{} -// sMsg.SendTime = sendTime -// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { -// return err -// } -// if n == 0 { -// sChat := UserChat{} -// sChat.UID = seqUid -// sChat.Msg = append(sChat.Msg, sMsg) -// err = c.Insert(&sChat) -// if err != nil { -// return err -// } -// } else { -// err = c.Update(bson.M{"uid": seqUid}, bson.M{"$push": bson.M{"msg": sMsg}}) -// if err != nil { -// return err -// } -// } -// log.NewDebug("", "insert mgo data cost time", getCurrentTimestampByMill()-newTime) -// return nil -//} -// -//func (d *db.DataBases) DelUserChatMongo2(uid string) error { -// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) -// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) -// filter := bson.M{"uid": uid} -// -// delTime := time.Now().Unix() - int64(config.Config.Mongo.DBRetainChatRecords)*24*3600 -// if _, err := c.UpdateOne(ctx, filter, bson.M{"$pull": bson.M{"msg": bson.M{"sendtime": bson.M{"$lte": delTime}}}}); err != nil { -// return utils.Wrap(err, "") -// } -// return nil -//} -// -//func (d *db.DataBases) MgoSkipUID(count int) (string, error) { -// return "", nil - //session := d.mgoSession.Clone() - //if session == nil { - // return "", errors.New("session == nil") - //} - //defer session.Close() - // - //c := session.DB(config.Config.Mongo.DBDatabase).C(cChat) - // - //sChat := UserChat{} - //c.Find(nil).Skip(count).Limit(1).One(&sChat) - //return sChat.UID, nil -} - -//func generateTagID(tagName, userID string) string { -// return utils.Md5(tagName + userID + strconv.Itoa(rand.Int()) + time.Now().String()) -//} - - - -//func getCurrentTimestampByMill() int64 { -// return time.Now().UnixNano() / 1e6 -//} -//func GetCurrentTimestampByMill() int64 { -// return time.Now().UnixNano() / 1e6 -//} - -//func getSeqUid(uid string, seq uint32) string { -// seqSuffix := seq / singleGocMsgNum -// return indexGen(uid, seqSuffix) -//} -// -//func getSeqUserIDList(userID string, maxSeq uint32) []string { -// seqMaxSuffix := maxSeq / singleGocMsgNum -// var seqUserIDList []string -// for i := 0; i <= int(seqMaxSuffix); i++ { -// seqUserID := indexGen(userID, uint32(i)) -// seqUserIDList = append(seqUserIDList, seqUserID) -// } -// return seqUserIDList -//} -// -//func getSeqSuperGroupID(groupID string, seq uint32) string { -// seqSuffix := seq / singleGocMsgNum -// return superGroupIndexGen(groupID, seqSuffix) -//} -// -//func GetSeqUid(uid string, seq uint32) string { -// return getSeqUid(uid, seq) -//} -// -//func getMsgIndex(seq uint32) int { -// seqSuffix := seq / singleGocMsgNum -// var index uint32 -// if seqSuffix == 0 { -// index = (seq - seqSuffix*singleGocMsgNum) - 1 -// } else { -// index = seq - seqSuffix*singleGocMsgNum -// } -// return int(index) -//} - -//func isContainInt32(target uint32, List []uint32) bool { -// for _, element := range List { -// if target == element { -// return true -// } -// } -// return false -//} -// -//func isNotContainInt32(target uint32, List []uint32) bool { -// for _, i := range List { -// if i == target { -// return false -// } -// } -// return true -//} -// -//func indexGen(uid string, seqSuffix uint32) string { -// return uid + ":" + strconv.FormatInt(int64(seqSuffix), 10) -//} - -//func superGroupIndexGen(groupID string, seqSuffix uint32) string { -// return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10) -//} - diff --git a/pkg/common/db/unrelation/msg.go b/pkg/common/db/unrelation/msg.go index 50e85fafd..783606105 100644 --- a/pkg/common/db/unrelation/msg.go +++ b/pkg/common/db/unrelation/msg.go @@ -1,561 +1,131 @@ package unrelation import ( - "Open_IM/pkg/common/constant" - "Open_IM/pkg/common/db/table/unrelation" + table "Open_IM/pkg/common/db/table/unrelation" "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/utils" "context" "errors" - "github.com/go-redis/redis/v8" - "github.com/gogo/protobuf/sortkeys" + "fmt" + "github.com/golang/protobuf/proto" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" - "sync" - "time" ) var ErrMsgListNotExist = errors.New("user not have msg in mongoDB") +var ErrMsgNotFound = errors.New("msg not found") type MsgMongoDriver struct { mgoDB *mongo.Database MsgCollection *mongo.Collection + msg table.MsgDocModel } func NewMsgMongoDriver(mgoDB *mongo.Database) *MsgMongoDriver { - return &MsgMongoDriver{mgoDB: mgoDB, MsgCollection: mgoDB.Collection(unrelation.CChat)} + return &MsgMongoDriver{mgoDB: mgoDB, MsgCollection: mgoDB.Collection(table.MsgDocModel{}.TableName())} } -func (m *MsgMongoDriver) FindOneAndUpdate(ctx context.Context, filter, update, output interface{}, opts ...*options.FindOneAndUpdateOptions) error { - return m.MsgCollection.FindOneAndUpdate(ctx, filter, update, opts...).Decode(output) +func (m *MsgMongoDriver) PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []table.MsgInfoModel) error { + filter := bson.M{"uid": docID} + return m.MsgCollection.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgsToMongo}}}).Err() } -func (m *MsgMongoDriver) UpdateOne(ctx context.Context, filter, update interface{}, opts ...*options.UpdateOptions) error { - _, err := m.MsgCollection.UpdateOne(ctx, filter, update, opts...) +func (m *MsgMongoDriver) Create(ctx context.Context, model *table.MsgDocModel) error { + _, err := m.MsgCollection.InsertOne(ctx, model) return err } -// database controller -func (m *MsgMongoDriver) DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) { - sortkeys.Uint32s(seqList) - suffixUserID2SubSeqList := func(uid string, seqList []uint32) map[string][]uint32 { - t := make(map[string][]uint32) - for i := 0; i < len(seqList); i++ { - seqUid := getSeqUid(uid, seqList[i]) - if value, ok := t[seqUid]; !ok { - var temp []uint32 - t[seqUid] = append(temp, seqList[i]) - } else { - t[seqUid] = append(value, seqList[i]) - } - } - return t - }(userID, seqList) - lock := sync.Mutex{} - var wg sync.WaitGroup - wg.Add(len(suffixUserID2SubSeqList)) - for k, v := range suffixUserID2SubSeqList { - go func(suffixUserID string, subSeqList []uint32) { - defer wg.Done() - unexistSeqList, err := m.DelMsgBySeqListInOneDoc(ctx, suffixUserID, subSeqList) - if err != nil { - return - } - lock.Lock() - totalUnExistSeqList = append(totalUnExistSeqList, unexistSeqList...) - lock.Unlock() - }(k, v) - } - return totalUnExistSeqList, nil -} - -func (m *MsgMongoDriver) DelMsgBySeqListInOneDoc(ctx context.Context, suffixUserID string, seqList []uint32) ([]uint32, error) { - seqMsgList, indexList, unexistSeqList, err := m.GetMsgAndIndexBySeqListInOneMongo2(suffixUserID, seqList) - if err != nil { - return nil, utils.Wrap(err, "") - } - for i, v := range seqMsgList { - if err := m.ReplaceMsgByIndex(suffixUserID, v, operationID, indexList[i]); err != nil { - return nil, utils.Wrap(err, "") - } - } - return unexistSeqList, nil -} - -// database -func (m *MsgMongoDriver) DelMsgLogic(ctx context.Context, uid string, seqList []uint32) error { - sortkeys.Uint32s(seqList) - seqMsgs, err := d.GetMsgBySeqListMongo2(ctx, uid, seqList) - if err != nil { - return utils.Wrap(err, "") - } - for _, seqMsg := range seqMsgs { - seqMsg.Status = constant.MsgDeleted - if err = d.ReplaceMsgBySeq(ctx, uid, seqMsg); err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), "ReplaceMsgListBySeq error", err.Error()) - } - } - return nil -} - -// model -func (m *MsgMongoDriver) ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error { - log.NewInfo(operationID, utils.GetSelfFuncName(), suffixUserID, *msg) - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - s := fmt.Sprintf("msg.%d.msg", seqIndex) - log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s) - msg.Status = constant.MsgDeleted +func (m *MsgMongoDriver) UpdateMsgStatusByIndexInOneDoc(ctx context.Context, docID string, msg *sdkws.MsgData, seqIndex int, status int32) error { + msg.Status = status bytes, err := proto.Marshal(msg) if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal failed ", err.Error(), msg.String()) return utils.Wrap(err, "") } - updateResult, err := c.UpdateOne(ctx, bson.M{"uid": suffixUserID}, bson.M{"$set": bson.M{s: bytes}}) - log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult) + _, err = m.MsgCollection.UpdateOne(ctx, bson.M{"uid": docID}, bson.M{"$set": bson.M{fmt.Sprintf("msg.%d.msg", seqIndex): bytes}}) if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error()) return utils.Wrap(err, "") } return nil } -func (d *db.DataBases) ReplaceMsgBySeq(uid string, msg *sdkws.MsgData, operationID string) error { - log.NewInfo(operationID, utils.GetSelfFuncName(), uid, *msg) - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - uid = getSeqUid(uid, msg.Seq) - seqIndex := getMsgIndex(msg.Seq) - s := fmt.Sprintf("msg.%d.msg", seqIndex) - log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s) - bytes, err := proto.Marshal(msg) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal", err.Error()) - return utils.Wrap(err, "") - } - - updateResult, err := c.UpdateOne( - ctx, bson.M{"uid": uid}, - bson.M{"$set": bson.M{s: bytes}}) - log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult) - if err != nil { - log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error()) - return utils.Wrap(err, "") - } - return nil +func (m *MsgMongoDriver) FindOneByDocID(ctx context.Context, docID string) (*table.MsgDocModel, error) { + doc := &table.MsgDocModel{} + err := m.MsgCollection.FindOne(ctx, bson.M{"uid": docID}).Decode(doc) + return doc, err } -func (d *db.DataBases) UpdateOneMsgList(msg *UserChat) error { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - _, err := c.UpdateOne(ctx, bson.M{"uid": msg.UID}, bson.M{"$set": bson.M{"msg": msg.Msg}}) - return err -} - -func (d *db.DataBases) GetMsgBySeqList(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { - log.NewInfo(operationID, utils.GetSelfFuncName(), uid, seqList) - var hasSeqList []uint32 - singleCount := 0 - session := d.mgoSession.Clone() - if session == nil { - return nil, errors.New("session == nil") - } - defer session.Close() - c := session.DB(config.Config.Mongo.DBDatabase).C(cChat) - m := func(uid string, seqList []uint32) map[string][]uint32 { - t := make(map[string][]uint32) - for i := 0; i < len(seqList); i++ { - seqUid := getSeqUid(uid, seqList[i]) - if value, ok := t[seqUid]; !ok { - var temp []uint32 - t[seqUid] = append(temp, seqList[i]) - } else { - t[seqUid] = append(value, seqList[i]) - } - } - return t - }(uid, seqList) - sChat := UserChat{} - for seqUid, value := range m { - if err = c.Find(bson.M{"uid": seqUid}).One(&sChat); err != nil { - log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error()) - continue - } - singleCount = 0 - for i := 0; i < len(sChat.Msg); i++ { - msg := new(sdkws.MsgData) - if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { - log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error()) - return nil, err - } - if isContainInt32(msg.Seq, value) { - seqMsg = append(seqMsg, msg) - hasSeqList = append(hasSeqList, msg.Seq) - singleCount++ - if singleCount == len(value) { - break - } - } - } - } - if len(hasSeqList) != len(seqList) { - var diff []uint32 - diff = utils.Difference(hasSeqList, seqList) - exceptionMSg := genExceptionMessageBySeqList(diff) - seqMsg = append(seqMsg, exceptionMSg...) - - } - return seqMsg, nil -} - -// model -func (d *db.DataBases) GetUserMsgListByIndex(docID string, index int64) (*UserChat, error) { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - regex := fmt.Sprintf("^%s", docID) +func (m *MsgMongoDriver) GetMsgsByIndex(ctx context.Context, sourceID string, index int64) (*table.MsgDocModel, error) { findOpts := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"uid": 1}) - var msgs []UserChat - //primitive.Regex{Pattern: regex} - cursor, err := c.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: regex}}, findOpts) + cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: fmt.Sprintf("^%s", sourceID)}}, findOpts) if err != nil { return nil, utils.Wrap(err, "") } + var msgs []table.MsgDocModel err = cursor.All(context.Background(), &msgs) if err != nil { return nil, utils.Wrap(err, fmt.Sprintf("cursor is %s", cursor.Current.String())) } if len(msgs) > 0 { return &msgs[0], nil - } else { - return nil, ErrMsgListNotExist } + return nil, ErrMsgListNotExist } -// model -func (d *db.DataBases) DelMongoMsgs(IDList []string) error { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - _, err := c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": IDList}}) +func (m *MsgMongoDriver) GetNewestMsg(ctx context.Context, sourceID string) (*table.MsgInfoModel, error) { + var msgDocs []table.MsgDocModel + cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": bson.M{"$regex": fmt.Sprintf("^%s", sourceID)}}, options.Find().SetLimit(1).SetSort(bson.M{"uid": -1})) + if err != nil { + return nil, utils.Wrap(err, "") + } + err = cursor.All(ctx, &msgDocs) + if err != nil { + return nil, utils.Wrap(err, "") + } + if len(msgDocs) > 0 { + if len(msgDocs[0].Msg) > 0 { + return &msgDocs[0].Msg[len(msgDocs[0].Msg)-1], nil + } + return nil, errors.New("len(msgDocs[0].Msg) < 0") + } + return nil, ErrMsgNotFound +} + +func (m *MsgMongoDriver) GetOldestMsg(ctx context.Context, sourceID string) (*table.MsgInfoModel, error) { + var msgDocs []table.MsgDocModel + cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": bson.M{"$regex": fmt.Sprintf("^%s", sourceID)}}, options.Find().SetLimit(1).SetSort(bson.M{"uid": 1})) + if err != nil { + return nil, err + } + err = cursor.All(ctx, &msgDocs) + if err != nil { + return nil, utils.Wrap(err, "") + } + var oldestMsg table.MsgInfoModel + if len(msgDocs) > 0 { + for _, v := range msgDocs[0].Msg { + if v.SendTime != 0 { + oldestMsg = v + break + } + } + if len(oldestMsg.Msg) == 0 { + if len(msgDocs[0].Msg) > 0 { + oldestMsg = msgDocs[0].Msg[0] + } + } + return &oldestMsg, nil + } + return nil, ErrMsgNotFound +} + +func (m *MsgMongoDriver) Delete(ctx context.Context, docIDs []string) error { + _, err := m.MsgCollection.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": docIDs}}) return err } -// model -func (d *db.DataBases) ReplaceMsgToBlankByIndex(suffixID string, index int) (replaceMaxSeq uint32, err error) { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - userChat := &UserChat{} - err = c.FindOne(ctx, bson.M{"uid": suffixID}).Decode(&userChat) - if err != nil { - return 0, err - } - for i, msg := range userChat.Msg { - if i <= index { - msgPb := &sdkws.MsgData{} - if err = proto.Unmarshal(msg.Msg, msgPb); err != nil { - continue - } - newMsgPb := &sdkws.MsgData{Seq: msgPb.Seq} - bytes, err := proto.Marshal(newMsgPb) - if err != nil { - continue - } - msg.Msg = bytes - msg.SendTime = 0 - replaceMaxSeq = msgPb.Seq - } - } - _, err = c.UpdateOne(ctx, bson.M{"uid": suffixID}, bson.M{"$set": bson.M{"msg": userChat.Msg}}) - return replaceMaxSeq, err -} - -func (d *db.DataBases) GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - regex := fmt.Sprintf("^%s", ID) - findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": -1}) - var userChats []UserChat - cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts) - if err != nil { - return nil, err - } - err = cursor.All(ctx, &userChats) - if err != nil { - return nil, utils.Wrap(err, "") - } - if len(userChats) > 0 { - if len(userChats[0].Msg) > 0 { - msgPb := &sdkws.MsgData{} - err = proto.Unmarshal(userChats[0].Msg[len(userChats[0].Msg)-1].Msg, msgPb) - if err != nil { - return nil, utils.Wrap(err, "") - } - return msgPb, nil - } - return nil, errors.New("len(userChats[0].Msg) < 0") - } - return nil, nil -} - -func (d *db.DataBases) GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - regex := fmt.Sprintf("^%s", ID) - findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": 1}) - var userChats []UserChat - cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts) - if err != nil { - return nil, err - } - err = cursor.All(ctx, &userChats) - if err != nil { - return nil, utils.Wrap(err, "") - } - var oldestMsg []byte - if len(userChats) > 0 { - for _, v := range userChats[0].Msg { - if v.SendTime != 0 { - oldestMsg = v.Msg - break - } - } - if len(oldestMsg) == 0 { - oldestMsg = userChats[0].Msg[len(userChats[0].Msg)-1].Msg - } - msgPb := &sdkws.MsgData{} - err = proto.Unmarshal(oldestMsg, msgPb) - if err != nil { - return nil, utils.Wrap(err, "") - } - return msgPb, nil - } - return nil, nil -} - -func (d *db.DataBases) GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { - var hasSeqList []uint32 - singleCount := 0 - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - - m := func(uid string, seqList []uint32) map[string][]uint32 { - t := make(map[string][]uint32) - for i := 0; i < len(seqList); i++ { - seqUid := getSeqUid(uid, seqList[i]) - if value, ok := t[seqUid]; !ok { - var temp []uint32 - t[seqUid] = append(temp, seqList[i]) - } else { - t[seqUid] = append(value, seqList[i]) - } - } - return t - }(uid, seqList) - sChat := UserChat{} - for seqUid, value := range m { - if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil { - log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error()) - continue - } - singleCount = 0 - for i := 0; i < len(sChat.Msg); i++ { - msg := new(sdkws.MsgData) - if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { - log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error()) - return nil, err - } - if isContainInt32(msg.Seq, value) { - seqMsg = append(seqMsg, msg) - hasSeqList = append(hasSeqList, msg.Seq) - singleCount++ - if singleCount == len(value) { - break - } - } - } - } - if len(hasSeqList) != len(seqList) { - var diff []uint32 - diff = utils.Difference(hasSeqList, seqList) - exceptionMSg := genExceptionMessageBySeqList(diff) - seqMsg = append(seqMsg, exceptionMSg...) - - } - return seqMsg, nil -} -func (d *db.DataBases) GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) { - var hasSeqList []uint32 - singleCount := 0 - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - - m := func(uid string, seqList []uint32) map[string][]uint32 { - t := make(map[string][]uint32) - for i := 0; i < len(seqList); i++ { - seqUid := getSeqUid(uid, seqList[i]) - if value, ok := t[seqUid]; !ok { - var temp []uint32 - t[seqUid] = append(temp, seqList[i]) - } else { - t[seqUid] = append(value, seqList[i]) - } - } - return t - }(groupID, seqList) - sChat := UserChat{} - for seqUid, value := range m { - if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil { - log.NewError(operationID, "not find seqGroupID", seqUid, value, groupID, seqList, err.Error()) - continue - } - singleCount = 0 - for i := 0; i < len(sChat.Msg); i++ { - msg := new(sdkws.MsgData) - if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { - log.NewError(operationID, "Unmarshal err", seqUid, value, groupID, seqList, err.Error()) - return nil, err - } - if isContainInt32(msg.Seq, value) { - seqMsg = append(seqMsg, msg) - hasSeqList = append(hasSeqList, msg.Seq) - singleCount++ - if singleCount == len(value) { - break - } - } - } - } - if len(hasSeqList) != len(seqList) { - var diff []uint32 - diff = utils.Difference(hasSeqList, seqList) - exceptionMSg := genExceptionSuperGroupMessageBySeqList(diff, groupID) - seqMsg = append(seqMsg, exceptionMSg...) - - } - return seqMsg, nil -} - -func (d *db.DataBases) GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unexistSeqList []uint32, err error) { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - sChat := UserChat{} - if err = c.FindOne(ctx, bson.M{"uid": suffixUserID}).Decode(&sChat); err != nil { - log.NewError(operationID, "not find seqUid", suffixUserID, err.Error()) - return nil, nil, nil, utils.Wrap(err, "") - } - singleCount := 0 - var hasSeqList []uint32 - for i := 0; i < len(sChat.Msg); i++ { - msg := new(sdkws.MsgData) - if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil { - log.NewError(operationID, "Unmarshal err", msg.String(), err.Error()) - return nil, nil, nil, err - } - if isContainInt32(msg.Seq, seqList) { - indexList = append(indexList, i) - seqMsg = append(seqMsg, msg) - hasSeqList = append(hasSeqList, msg.Seq) - singleCount++ - if singleCount == len(seqList) { - break - } - } - } - for _, i := range seqList { - if isContainInt32(i, hasSeqList) { - continue - } - unexistSeqList = append(unexistSeqList, i) - } - return seqMsg, indexList, unexistSeqList, nil -} - -func (d *db.DataBases) SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - newTime := getCurrentTimestampByMill() - operationID := "" - seqUid := getSeqUid(uid, m.MsgData.Seq) - filter := bson.M{"uid": seqUid} - var err error - sMsg := MsgInfo{} - sMsg.SendTime = sendTime - if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { - return utils.Wrap(err, "") - } - err = c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": sMsg}}).Err() - log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime) - if err != nil { - sChat := UserChat{} - sChat.UID = seqUid - sChat.Msg = append(sChat.Msg, sMsg) - if _, err = c.InsertOne(ctx, &sChat); err != nil { - log.NewDebug(operationID, "InsertOne failed", filter) - return utils.Wrap(err, "") - } - } else { - log.NewDebug(operationID, "FindOneAndUpdate ok", filter) - } - - log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime) - return nil -} - -func (d *db.DataBases) SaveUserChat(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error { - var seqUid string - newTime := getCurrentTimestampByMill() - session := d.mgoSession.Clone() - if session == nil { - return errors.New("session == nil") - } - defer session.Close() - log.NewDebug("", "get mgoSession cost time", getCurrentTimestampByMill()-newTime) - c := session.DB(config.Config.Mongo.DBDatabase).C(cChat) - seqUid = getSeqUid(uid, m.MsgData.Seq) - n, err := c.Find(bson.M{"uid": seqUid}).Count() - if err != nil { - return err - } - log.NewDebug("", "find mgo uid cost time", getCurrentTimestampByMill()-newTime) - sMsg := MsgInfo{} - sMsg.SendTime = sendTime - if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil { - return err - } - if n == 0 { - sChat := UserChat{} - sChat.UID = seqUid - sChat.Msg = append(sChat.Msg, sMsg) - err = c.Insert(&sChat) - if err != nil { - return err - } - } else { - err = c.Update(bson.M{"uid": seqUid}, bson.M{"$push": bson.M{"msg": sMsg}}) - if err != nil { - return err - } - } - log.NewDebug("", "insert mgo data cost time", getCurrentTimestampByMill()-newTime) - return nil -} - -func (d *db.DataBases) CleanUpUserMsgFromMongo(userID string, operationID string) error { - ctx := context.Background() - c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) - maxSeq, err := d.GetUserMaxSeq(userID) - if err == redis.Nil { - return nil - } - if err != nil { - return utils.Wrap(err, "") - } - - seqUsers := getSeqUserIDList(userID, uint32(maxSeq)) - log.Error(operationID, "getSeqUserIDList", seqUsers) - _, err = c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": seqUsers}}) - if err == mongo.ErrNoDocuments { - return nil - } - return utils.Wrap(err, "") +func (m *MsgMongoDriver) UpdateOneDoc(ctx context.Context, msg *table.MsgDocModel) error { + _, err := m.MsgCollection.UpdateOne(ctx, bson.M{"uid": msg.DocID}, bson.M{"$set": bson.M{"msg": msg.Msg}}) + return err } diff --git a/pkg/common/db/unrelation/super_group.go b/pkg/common/db/unrelation/super_group.go index 720693d6d..7b74b76f4 100644 --- a/pkg/common/db/unrelation/super_group.go +++ b/pkg/common/db/unrelation/super_group.go @@ -127,7 +127,7 @@ type SuperGroupMongoDriver struct { //} func (s *SuperGroupMongoDriver) CreateSuperGroup(ctx context.Context, groupID string, initMemberIDs []string, tx ...any) error { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) _, err := s.superGroupCollection.InsertOne(ctx, &unrelation.SuperGroupModel{ GroupID: groupID, MemberIDs: initMemberIDs, @@ -147,7 +147,7 @@ func (s *SuperGroupMongoDriver) CreateSuperGroup(ctx context.Context, groupID st } func (s *SuperGroupMongoDriver) TakeSuperGroup(ctx context.Context, groupID string, tx ...any) (group *unrelation.SuperGroupModel, err error) { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) if err := s.superGroupCollection.FindOne(ctx, bson.M{"group_id": groupID}).Decode(&group); err != nil { return nil, utils.Wrap(err, "") } @@ -155,14 +155,15 @@ func (s *SuperGroupMongoDriver) TakeSuperGroup(ctx context.Context, groupID stri } func (s *SuperGroupMongoDriver) FindSuperGroup(ctx context.Context, groupIDs []string, tx ...any) (groups []*unrelation.SuperGroupModel, err error) { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) cursor, err := s.superGroupCollection.Find(ctx, bson.M{"group_id": bson.M{ "$in": groupIDs, }}) if err != nil { - return nil, utils.Wrap(err, "") + return nil, err } defer cursor.Close(ctx) + if err := cursor.All(ctx, &groups); err != nil { return nil, utils.Wrap(err, "") } @@ -170,7 +171,7 @@ func (s *SuperGroupMongoDriver) FindSuperGroup(ctx context.Context, groupIDs []s } func (s *SuperGroupMongoDriver) AddUserToSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { _, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$addToSet": bson.M{"member_id_list": bson.M{"$each": userIDs}}}) @@ -194,7 +195,7 @@ func (s *SuperGroupMongoDriver) AddUserToSuperGroup(ctx context.Context, groupID } func (s *SuperGroupMongoDriver) RemoverUserFromSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { _, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$pull": bson.M{"member_id_list": bson.M{"$in": userIDs}}}) @@ -212,14 +213,14 @@ func (s *SuperGroupMongoDriver) RemoverUserFromSuperGroup(ctx context.Context, g } func (s *SuperGroupMongoDriver) GetSuperGroupByUserID(ctx context.Context, userID string, tx ...any) (*unrelation.UserToSuperGroupModel, error) { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) var user unrelation.UserToSuperGroupModel err := s.userToSuperGroupCollection.FindOne(ctx, bson.M{"user_id": userID}).Decode(&user) return &user, utils.Wrap(err, "") } func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) group, err := s.TakeSuperGroup(ctx, groupID, tx...) if err != nil { return err @@ -231,7 +232,7 @@ func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID st } //func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error { -// ctx = s.getTxCtx(ctx, tx) +// ctx = getTxCtx(ctx, tx) // opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) // return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { // superGroup := &unrelation.SuperGroupModel{} @@ -249,7 +250,7 @@ func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID st //} func (s *SuperGroupMongoDriver) RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error { - ctx = s.getTxCtx(ctx, tx) + ctx = getTxCtx(ctx, tx) _, err := s.userToSuperGroupCollection.UpdateOne(ctx, bson.M{"user_id": bson.M{"$in": userIDs}}, bson.M{"$pull": bson.M{"group_id_list": groupID}}) return utils.Wrap(err, "") } diff --git a/pkg/common/http/http_client.go b/pkg/common/http/http_client.go index 5d75fe54f..319ca8c6d 100644 --- a/pkg/common/http/http_client.go +++ b/pkg/common/http/http_client.go @@ -7,7 +7,7 @@ package http import ( - cbApi "Open_IM/pkg/callback_struct" + cbApi "Open_IM/pkg/callbackstruct" "Open_IM/pkg/common/config" "Open_IM/pkg/common/constant" "bytes" diff --git a/pkg/common/kafka/producer.go b/pkg/common/kafka/producer.go index 1df0e0ed8..c894fd2b4 100644 --- a/pkg/common/kafka/producer.go +++ b/pkg/common/kafka/producer.go @@ -9,7 +9,7 @@ import ( "github.com/Shopify/sarama" "github.com/golang/protobuf/proto" - promePkg "Open_IM/pkg/common/prometheus" + prome "Open_IM/pkg/common/prometheus" ) type Producer struct { @@ -66,7 +66,7 @@ func (p *Producer) SendMessage(m proto.Message, key string, operationID string) a, b, c := p.producer.SendMessage(kMsg) log.Info(operationID, "ByteEncoder SendMessage end", "key ", kMsg.Key.Length(), kMsg.Value.Length(), p.producer) if c == nil { - promePkg.PromeInc(promePkg.SendMsgCounter) + prome.PromeInc(prome.SendMsgCounter) } return a, b, utils.Wrap(c, "") } diff --git a/pkg/common/prometheus/gather.go b/pkg/common/prome/gather.go similarity index 99% rename from pkg/common/prometheus/gather.go rename to pkg/common/prome/gather.go index 3dd7d05a2..5f7a2293d 100644 --- a/pkg/common/prometheus/gather.go +++ b/pkg/common/prome/gather.go @@ -1,4 +1,4 @@ -package prometheus +package prome import ( "github.com/prometheus/client_golang/prometheus" diff --git a/pkg/common/prometheus/grpc.go b/pkg/common/prome/grpc.go similarity index 98% rename from pkg/common/prometheus/grpc.go rename to pkg/common/prome/grpc.go index d0d513b00..fe0a633fa 100644 --- a/pkg/common/prometheus/grpc.go +++ b/pkg/common/prome/grpc.go @@ -1,4 +1,4 @@ -package prometheus +package prome import ( "context" diff --git a/pkg/common/prometheus/prometheus.go b/pkg/common/prome/prometheus.go similarity index 98% rename from pkg/common/prometheus/prometheus.go rename to pkg/common/prome/prometheus.go index 7c497a78c..f6d70d9b8 100644 --- a/pkg/common/prometheus/prometheus.go +++ b/pkg/common/prome/prometheus.go @@ -1,4 +1,4 @@ -package prometheus +package prome import ( "Open_IM/pkg/common/config" diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 2590f4469..140f2586d 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -7,6 +7,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/jinzhu/copier" "github.com/pkg/errors" + "hash/crc32" "math/rand" "reflect" "runtime" @@ -226,3 +227,7 @@ func ProtoToMap(pb proto.Message, idFix bool) map[string]interface{} { } return out } + +func GetHashCode(s string) uint32 { + return crc32.ChecksumIEEE([]byte(s)) +}