This commit is contained in:
wangchuxiao 2023-02-15 15:52:32 +08:00
parent 85ebf24325
commit 6619183a48
62 changed files with 1319 additions and 2491 deletions

View File

@ -28,7 +28,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
//"syscall" //"syscall"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
) )
// @title open-IM-Server API // @title open-IM-Server API
@ -50,11 +50,11 @@ func main() {
log.Info("load config: ", config.Config) log.Info("load config: ", config.Config)
r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewApiRequestCounter() prome.NewApiRequestCounter()
promePkg.NewApiRequestFailedCounter() prome.NewApiRequestFailedCounter()
promePkg.NewApiRequestSuccessCounter() prome.NewApiRequestSuccessCounter()
r.Use(promePkg.PromeTheusMiddleware) r.Use(prome.PromeTheusMiddleware)
r.GET("/metrics", promePkg.PrometheusHandler()) r.GET("/metrics", prome.PrometheusHandler())
} }
// user routing group, which handles user registration and login services // user routing group, which handles user registration and login services
userRouterGroup := r.Group("/user") userRouterGroup := r.Group("/user")

View File

@ -1,7 +1,7 @@
package main package main
import ( import (
"Open_IM/internal/cron_task" "Open_IM/internal/crontask"
"flag" "flag"
"fmt" "fmt"
"time" "time"

View File

@ -1,7 +1,7 @@
package main package main
import ( import (
"Open_IM/internal/msg_transfer/logic" "Open_IM/internal/msgtransfer"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
@ -16,8 +16,8 @@ func main() {
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessageTransferPrometheusPort[0], "MessageTransferPrometheusPort default listen port") prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessageTransferPrometheusPort[0], "MessageTransferPrometheusPort default listen port")
flag.Parse() flag.Parse()
log.NewPrivateLog(constant.LogFileName) log.NewPrivateLog(constant.LogFileName)
logic.Init() msgTransfer := msgtransfer.NewMsgTransfer()
fmt.Println("start msg_transfer server ", ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start msg_transfer server ", ", OpenIM version: ", constant.CurrentVersion, "\n")
logic.Run(*prometheusPort) msgTransfer.Run(*prometheusPort)
wg.Wait() wg.Wait()
} }

View File

@ -4,7 +4,7 @@ import (
rpcAuth "Open_IM/internal/rpc/auth" rpcAuth "Open_IM/internal/rpc/auth"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/prome"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start auth rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start auth rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort) rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -4,7 +4,7 @@ import (
rpcConversation "Open_IM/internal/rpc/conversation" rpcConversation "Open_IM/internal/rpc/conversation"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start conversation rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start conversation rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort) rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -4,7 +4,7 @@ import (
"Open_IM/internal/rpc/friend" "Open_IM/internal/rpc/friend"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start friend rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start friend rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := friend.NewFriendServer(*rpcPort) rpcServer := friend.NewFriendServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -4,7 +4,7 @@ import (
"Open_IM/internal/rpc/group" "Open_IM/internal/rpc/group"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start group rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start group rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := group.NewGroupServer(*rpcPort) rpcServer := group.NewGroupServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -4,7 +4,7 @@ import (
"Open_IM/internal/rpc/msg" "Open_IM/internal/rpc/msg"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start msg rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start msg rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := msg.NewRpcChatServer(*rpcPort) rpcServer := msg.NewRpcChatServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -4,7 +4,7 @@ import (
"Open_IM/internal/rpc/user" "Open_IM/internal/rpc/user"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"flag" "flag"
"fmt" "fmt"
) )
@ -17,7 +17,7 @@ func main() {
fmt.Println("start user rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n") fmt.Println("start user rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := user.NewUserServer(*rpcPort) rpcServer := user.NewUserServer(*rpcPort)
go func() { go func() {
err := promePkg.StartPromeSrv(*prometheusPort) err := prome.StartPromeSrv(*prometheusPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -51,9 +51,6 @@ kafka:
ws2mschat: ws2mschat:
addr: [ 127.0.0.1:9092 ] #kafka配置默认即可 addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
topic: "ws2ms_chat" #用于mongo和mysql保存消息 topic: "ws2ms_chat" #用于mongo和mysql保存消息
# ws2mschatoffline:
# addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
# topic: "ws2ms_chat_offline" #原为离线用户消息处理,目前暂时弃用
msgtomongo: msgtomongo:
addr: [ 127.0.0.1:9092 ] #kafka配置默认即可 addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
topic: "msg_to_mongo" topic: "msg_to_mongo"
@ -223,8 +220,6 @@ push:
appSecret: appSecret:
enable: false enable: false
manager: manager:
#app管理员userID和对应的secret 建议修改。 用于管理后台登录也可以用户管理后台对应的api #app管理员userID和对应的secret 建议修改。 用于管理后台登录也可以用户管理后台对应的api
appManagerUid: [ "openIM123456","openIM654321", "openIM333", "openIMAdmin"] appManagerUid: [ "openIM123456","openIM654321", "openIM333", "openIMAdmin"]
@ -238,8 +233,6 @@ multiloginpolicy: 1
#msg log insert to db #msg log insert to db
chatpersistencemysql: true chatpersistencemysql: true
#可靠性存储
reliablestorage: false
#消息缓存时间 #消息缓存时间
msgCacheTimeout: 86400 msgCacheTimeout: 86400
#群聊已读开启 #群聊已读开启

View File

@ -1,4 +1,3 @@
notification: notification:
groupCreated: groupCreated:
conversation: conversation:

View File

@ -4,172 +4,97 @@ import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/controller"
"Open_IM/pkg/common/db/mongo" "Open_IM/pkg/common/db/mongo"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/tracelog"
sdkws "Open_IM/pkg/proto/sdkws" sdkws "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context"
"math" "math"
"strconv" "strconv"
"strings" "strings"
goRedis "github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
const oldestList = 0 type SeqCheckInterface interface {
const newestList = -1 ClearAll() error
}
func ResetUserGroupMinSeq(operationID, groupID string, userIDList []string) error { type ClearMsgCronTask struct {
var delStruct delMsgRecursionStruct msgModel controller.MsgInterface
minSeq, err := deleteMongoMsg(operationID, groupID, oldestList, &delStruct) userModel controller.UserInterface
if err != nil { groupModel controller.GroupInterface
log.NewError(operationID, utils.GetSelfFuncName(), groupID, "deleteMongoMsg failed") cache cache.Cache
}
func (c *ClearMsgCronTask) getCronTaskOperationID() string {
return cronTaskOperationID + utils.OperationIDGenerator()
}
func (c *ClearMsgCronTask) ClearAll() {
operationID := c.getCronTaskOperationID()
ctx := context.Background()
tracelog.SetOperationID(ctx, operationID)
log.NewInfo(operationID, "========================= start del cron task =========================")
var err error
userIDList, err := c.userModel.GetAllUserID(ctx)
if err == nil {
c.StartClearMsg(operationID, userIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
} }
if minSeq == 0 { // working group msg clear
return nil workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup)
if err == nil {
c.StartClearWorkingGroupMsg(operationID, workingGroupIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
} }
log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDList:", delStruct, "minSeq", minSeq)
log.NewInfo(operationID, "========================= start del cron finished =========================")
}
func (c *ClearMsgCronTask) StartClearMsg(operationID string, userIDList []string) {
log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList)
for _, userID := range userIDList { for _, userID := range userIDList {
userMinSeq, err := db.DB.GetGroupUserMinSeq(groupID, userID) if err := DeleteUserMsgsAndSetMinSeq(operationID, userID); err != nil {
if err != nil && err != goRedis.Nil { log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID)
log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error()) }
if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), userID, err)
}
}
}
func (c *ClearMsgCronTask) StartClearWorkingGroupMsg(operationID string, workingGroupIDList []string) {
log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList)
for _, groupID := range workingGroupIDList {
userIDList, err := rocksCache.GetGroupMemberIDListFromCache(groupID)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID)
continue continue
} }
if userMinSeq > uint64(minSeq) { log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "workingGroupIDList:", userIDList)
err = db.DB.SetGroupUserMinSeq(groupID, userID, userMinSeq) if err := DeleteUserSuperGroupMsgsAndSetMinSeq(operationID, groupID, userIDList); err != nil {
} else { log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList)
err = db.DB.SetGroupUserMinSeq(groupID, userID, uint64(minSeq))
} }
if err != nil { if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userID, userMinSeq, minSeq) log.NewError(operationID, utils.GetSelfFuncName(), groupID, err)
} }
} }
return nil
} }
func DeleteMongoMsgAndResetRedisSeq(operationID, userID string) error { func checkMaxSeqWithMongo(operationID, sourceID string, diffusionType int) error {
var delStruct delMsgRecursionStruct
minSeq, err := deleteMongoMsg(operationID, userID, oldestList, &delStruct)
if err != nil {
return utils.Wrap(err, "")
}
if minSeq == 0 {
return nil
}
log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDStruct: ", delStruct, "minSeq", minSeq)
err = db.DB.SetUserMinSeq(userID, minSeq)
return utils.Wrap(err, "")
}
// del list
func delMongoMsgsPhysical(uidList []string) error {
if len(uidList) > 0 {
err := db.DB.DelMongoMsgs(uidList)
if err != nil {
return utils.Wrap(err, "DelMongoMsgs failed")
}
}
return nil
}
type delMsgRecursionStruct struct {
minSeq uint32
delUidList []string
}
func (d *delMsgRecursionStruct) getSetMinSeq() uint32 {
return d.minSeq
}
// index 0....19(del) 20...69
// seq 70
// set minSeq 21
// recursion 删除list并且返回设置的最小seq
func deleteMongoMsg(operationID string, ID string, index int64, delStruct *delMsgRecursionStruct) (uint32, error) {
// find from oldest list
msgs, err := db.DB.GetUserMsgListByIndex(ID, index)
if err != nil || msgs.UID == "" {
if err != nil {
if err == mongoDB.ErrMsgListNotExist {
log.NewInfo(operationID, utils.GetSelfFuncName(), "ID:", ID, "index:", index, err.Error())
} else {
log.NewError(operationID, utils.GetSelfFuncName(), "GetUserMsgListByIndex failed", err.Error(), index, ID)
}
}
// 获取报错或者获取不到了物理删除并且返回seq
err = delMongoMsgsPhysical(delStruct.delUidList)
if err != nil {
return 0, err
}
return delStruct.getSetMinSeq() + 1, nil
}
log.NewDebug(operationID, "ID:", ID, "index:", index, "uid:", msgs.UID, "len:", len(msgs.Msg))
if len(msgs.Msg) > mongoDB.GetSingleGocMsgNum() {
log.NewWarn(operationID, utils.GetSelfFuncName(), "msgs too large", len(msgs.Msg), msgs.UID)
}
if msgs.Msg[len(msgs.Msg)-1].SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) > utils.GetCurrentTimestampByMill() && msgListIsFull(msgs) {
delStruct.delUidList = append(delStruct.delUidList, msgs.UID)
lastMsgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
delStruct.minSeq = lastMsgPb.Seq
} else {
var hasMarkDelFlag bool
for _, msg := range msgs.Msg {
msgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msg.Msg, msgPb)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
if utils.GetCurrentTimestampByMill() > msg.SendTime+(int64(config.Config.Mongo.DBRetainChatRecords)*24*60*60*1000) {
msgPb.Status = constant.MsgDeleted
bytes, _ := proto.Marshal(msgPb)
msg.Msg = bytes
msg.SendTime = 0
hasMarkDelFlag = true
} else {
if err := delMongoMsgsPhysical(delStruct.delUidList); err != nil {
return 0, err
}
if hasMarkDelFlag {
if err := db.DB.UpdateOneMsgList(msgs); err != nil {
return delStruct.getSetMinSeq(), utils.Wrap(err, "")
}
}
return msgPb.Seq + 1, nil
}
}
}
log.NewDebug(operationID, ID, "continue to", delStruct)
// 继续递归 index+1
seq, err := deleteMongoMsg(operationID, ID, index+1, delStruct)
return seq, utils.Wrap(err, "deleteMongoMsg failed")
}
func msgListIsFull(chat *mongoDB.UserChat) bool {
index, _ := strconv.Atoi(strings.Split(chat.UID, ":")[1])
if index == 0 {
if len(chat.Msg) >= 4999 {
return true
}
}
if len(chat.Msg) >= 5000 {
return true
}
return false
}
func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error {
var seqRedis uint64 var seqRedis uint64
var err error var err error
if diffusionType == constant.WriteDiffusion { if diffusionType == constant.WriteDiffusion {
seqRedis, err = db.DB.GetUserMaxSeq(ID) seqRedis, err = db.DB.GetUserMaxSeq(sourceID)
} else { } else {
seqRedis, err = db.DB.GetGroupMaxSeq(ID) seqRedis, err = db.DB.GetGroupMaxSeq(sourceID)
} }
if err != nil { if err != nil {
if err == goRedis.Nil { if err == goRedis.Nil {
@ -177,7 +102,7 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error {
} }
return utils.Wrap(err, "GetUserMaxSeq failed") return utils.Wrap(err, "GetUserMaxSeq failed")
} }
msg, err := db.DB.GetNewestMsg(ID) msg, err := db.DB.GetNewestMsg(sourceID)
if err != nil { if err != nil {
return utils.Wrap(err, "GetNewestMsg failed") return utils.Wrap(err, "GetNewestMsg failed")
} }
@ -185,9 +110,9 @@ func checkMaxSeqWithMongo(operationID, ID string, diffusionType int) error {
return nil return nil
} }
if math.Abs(float64(msg.Seq-uint32(seqRedis))) > 10 { if math.Abs(float64(msg.Seq-uint32(seqRedis))) > 10 {
log.NewWarn(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "redis maxSeq is different with msg.Seq > 10", "status: ", msg.Status, msg.SendTime) log.NewWarn(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, sourceID, "redis maxSeq is different with msg.Seq > 10", "status: ", msg.Status, msg.SendTime)
} else { } else {
log.NewInfo(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, ID, "seq and msg OK", "status:", msg.Status, msg.SendTime) log.NewInfo(operationID, utils.GetSelfFuncName(), "seqMongo, seqRedis", msg.Seq, seqRedis, sourceID, "seq and msg OK", "status:", msg.Status, msg.SendTime)
} }
return nil return nil
} }

View File

@ -2,8 +2,7 @@ package cronTask
import ( import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
mongo2 "Open_IM/pkg/common/db/mongo" "Open_IM/pkg/proto/sdkws"
sdkws "Open_IM/pkg/proto/sdkws"
"context" "context"
"fmt" "fmt"
"strconv" "strconv"
@ -59,7 +58,7 @@ func CreateChat(userChat *mongo2.UserChat) error {
return err return err
} }
func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) { func TestDeleteUserMsgsAndSetMinSeq(t *testing.T) {
operationID := getCronTaskOperationID() operationID := getCronTaskOperationID()
redisClient = redis.NewClient(&redis.Options{ redisClient = redis.NewClient(&redis.Options{
Addr: "127.0.0.1:16379", Addr: "127.0.0.1:16379",
@ -81,7 +80,7 @@ func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) {
userChat := GenUserChat(1, 500, 200, 0, testUID1) userChat := GenUserChat(1, 500, 200, 0, testUID1)
err = CreateChat(userChat) err = CreateChat(userChat)
if err := DeleteMongoMsgAndResetRedisSeq(operationID, testUID1); err != nil { if err := DeleteUserMsgsAndSetMinSeq(operationID, testUID1); err != nil {
t.Error("checkMaxSeqWithMongo failed", testUID1) t.Error("checkMaxSeqWithMongo failed", testUID1)
} }
if err := checkMaxSeqWithMongo(operationID, testUID1, constant.WriteDiffusion); err != nil { if err := checkMaxSeqWithMongo(operationID, testUID1, constant.WriteDiffusion); err != nil {
@ -94,7 +93,7 @@ func TestDeleteMongoMsgAndResetRedisSeq(t *testing.T) {
// for _, groupID := range testWorkingGroupIDList { // for _, groupID := range testWorkingGroupIDList {
// operationID = groupID + "-" + operationID // operationID = groupID + "-" + operationID
// log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "userIDList:", testUserIDList) // log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "userIDList:", testUserIDList)
// if err := ResetUserGroupMinSeq(operationID, groupID, testUserIDList); err != nil { // if err := DeleteUserSuperGroupMsgsAndSetMinSeq(operationID, groupID, testUserIDList); err != nil {
// t.Error("checkMaxSeqWithMongo failed", groupID) // t.Error("checkMaxSeqWithMongo failed", groupID)
// } // }
// if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil { // if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil {

View File

@ -2,10 +2,6 @@ package cronTask
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db/controller"
"Open_IM/pkg/common/db/mysql_model/im_mysql_model"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"fmt" "fmt"
@ -15,9 +11,10 @@ import (
) )
const cronTaskOperationID = "cronTaskOperationID-" const cronTaskOperationID = "cronTaskOperationID-"
const moduleName = "cron"
func StartCronTask(userID, workingGroupID string) { func StartCronTask(userID, workingGroupID string) {
log.NewPrivateLog("cron") log.NewPrivateLog(moduleName)
log.NewInfo(utils.OperationIDGenerator(), "start cron task", "cron config", config.Config.Mongo.ChatRecordsClearTime) log.NewInfo(utils.OperationIDGenerator(), "start cron task", "cron config", config.Config.Mongo.ChatRecordsClearTime)
fmt.Println("cron task start, config", config.Config.Mongo.ChatRecordsClearTime) fmt.Println("cron task start, config", config.Config.Mongo.ChatRecordsClearTime)
if userID != "" { if userID != "" {
@ -44,59 +41,3 @@ func StartCronTask(userID, workingGroupID string) {
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
} }
} }
func getCronTaskOperationID() string {
return cronTaskOperationID + utils.OperationIDGenerator()
}
func ClearAll() {
operationID := getCronTaskOperationID()
log.NewInfo(operationID, "========================= start del cron task =========================")
var err error
userIDList, err := im_mysql_model.SelectAllUserID()
if err == nil {
StartClearMsg(operationID, userIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
}
// working group msg clear
workingGroupIDList, err := im_mysql_model.GetGroupIDListByGroupType(constant.WorkingGroup)
if err == nil {
StartClearWorkingGroupMsg(operationID, workingGroupIDList)
} else {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error())
}
log.NewInfo(operationID, "========================= start del cron finished =========================")
}
func StartClearMsg(operationID string, userIDList []string) {
log.NewDebug(operationID, utils.GetSelfFuncName(), "userIDList: ", userIDList)
for _, userID := range userIDList {
if err := DeleteMongoMsgAndResetRedisSeq(operationID, userID); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), userID)
}
if err := checkMaxSeqWithMongo(operationID, userID, constant.WriteDiffusion); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), userID, err)
}
}
}
func StartClearWorkingGroupMsg(operationID string, workingGroupIDList []string) {
log.NewDebug(operationID, utils.GetSelfFuncName(), "workingGroupIDList: ", workingGroupIDList)
for _, groupID := range workingGroupIDList {
userIDList, err := rocksCache.GetGroupMemberIDListFromCache(groupID)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID)
continue
}
log.NewDebug(operationID, utils.GetSelfFuncName(), "groupID:", groupID, "workingGroupIDList:", userIDList)
if err := ResetUserGroupMinSeq(operationID, groupID, userIDList); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userIDList)
}
if err := checkMaxSeqWithMongo(operationID, groupID, constant.ReadDiffusion); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), groupID, err)
}
}
}

View File

@ -8,7 +8,7 @@ import (
"fmt" "fmt"
"sync" "sync"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
) )
@ -40,7 +40,7 @@ func Run(promethuesPort int) {
go ws.run() go ws.run()
go rpcSvr.run() go rpcSvr.run()
go func() { go func() {
err := promePkg.StartPromeSrv(promethuesPort) err := prome.StartPromeSrv(promethuesPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -5,7 +5,7 @@ import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/prome"
pbChat "Open_IM/pkg/proto/msg" pbChat "Open_IM/pkg/proto/msg"
push "Open_IM/pkg/proto/push" push "Open_IM/pkg/proto/push"
pbRtc "Open_IM/pkg/proto/rtc" pbRtc "Open_IM/pkg/proto/rtc"
@ -51,18 +51,18 @@ func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) {
case constant.WSGetNewestSeq: case constant.WSGetNewestSeq:
log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.getSeqReq(conn, &m) ws.getSeqReq(conn, &m)
promePkg.PromeInc(promePkg.GetNewestSeqTotalCounter) prome.PromeInc(prome.GetNewestSeqTotalCounter)
case constant.WSSendMsg: case constant.WSSendMsg:
log.NewInfo(m.OperationID, "sendMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "sendMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.sendMsgReq(conn, &m) ws.sendMsgReq(conn, &m)
promePkg.PromeInc(promePkg.MsgRecvTotalCounter) prome.PromeInc(prome.MsgRecvTotalCounter)
case constant.WSSendSignalMsg: case constant.WSSendSignalMsg:
log.NewInfo(m.OperationID, "sendSignalMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "sendSignalMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.sendSignalMsgReq(conn, &m) ws.sendSignalMsgReq(conn, &m)
case constant.WSPullMsgBySeqList: case constant.WSPullMsgBySeqList:
log.NewInfo(m.OperationID, "pullMsgBySeqListReq ", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "pullMsgBySeqListReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.pullMsgBySeqListReq(conn, &m) ws.pullMsgBySeqListReq(conn, &m)
promePkg.PromeInc(promePkg.PullMsgBySeqListTotalCounter) prome.PromeInc(prome.PullMsgBySeqListTotalCounter)
case constant.WsLogoutMsg: case constant.WsLogoutMsg:
log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier) log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.userLogoutReq(conn, &m) ws.userLogoutReq(conn, &m)

View File

@ -4,7 +4,7 @@ import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prome"
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
pbRelay "Open_IM/pkg/proto/relay" pbRelay "Open_IM/pkg/proto/relay"
sdkws "Open_IM/pkg/proto/sdkws" sdkws "Open_IM/pkg/proto/sdkws"
@ -34,14 +34,14 @@ type RPCServer struct {
} }
func initPrometheus() { func initPrometheus() {
promePkg.NewMsgRecvTotalCounter() prome.NewMsgRecvTotalCounter()
promePkg.NewGetNewestSeqTotalCounter() prome.NewGetNewestSeqTotalCounter()
promePkg.NewPullMsgBySeqListTotalCounter() prome.NewPullMsgBySeqListTotalCounter()
promePkg.NewMsgOnlinePushSuccessCounter() prome.NewMsgOnlinePushSuccessCounter()
promePkg.NewOnlineUserGauges() prome.NewOnlineUserGauges()
//promePkg.NewSingleChatMsgRecvSuccessCounter() //prome.NewSingleChatMsgRecvSuccessCounter()
//promePkg.NewGroupChatMsgRecvSuccessCounter() //prome.NewGroupChatMsgRecvSuccessCounter()
//promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter() //prome.NewWorkSuperGroupChatMsgRecvSuccessCounter()
} }
func (r *RPCServer) onInit(rpcPort int) { func (r *RPCServer) onInit(rpcPort int) {
@ -67,11 +67,11 @@ func (r *RPCServer) run() {
defer listener.Close() defer listener.Close()
var grpcOpts []grpc.ServerOption var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)
@ -205,7 +205,7 @@ func (r *RPCServer) SuperGroupOnlineBatchPushOneMsg(_ context.Context, req *pbRe
resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v) resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v)
if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) { if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
tempT.OnlinePush = true tempT.OnlinePush = true
promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter) prome.PromeInc(prome.MsgOnlinePushSuccessCounter)
log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v) log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v)
temp.ResultCode = resultCode temp.ResultCode = resultCode
resp = append(resp, temp) resp = append(resp, temp)

View File

@ -5,7 +5,7 @@ import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
pbRelay "Open_IM/pkg/proto/relay" pbRelay "Open_IM/pkg/proto/relay"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
@ -352,7 +352,7 @@ func (ws *WServer) addUserConn(uid string, platformID int, conn *UserConn, token
for _, v := range ws.wsUserToConn { for _, v := range ws.wsUserToConn {
count = count + len(v) count = count + len(v)
} }
promePkg.PromeGaugeInc(promePkg.OnlineUserGauge) prome.PromeGaugeInc(prome.OnlineUserGauge)
log.Debug(operationID, "WS Add operation", "", "wsUser added", ws.wsUserToConn, "connection_uid", uid, "connection_platform", constant.PlatformIDToName(platformID), "online_user_num", len(ws.wsUserToConn), "online_conn_num", count) log.Debug(operationID, "WS Add operation", "", "wsUser added", ws.wsUserToConn, "connection_uid", uid, "connection_platform", constant.PlatformIDToName(platformID), "online_user_num", len(ws.wsUserToConn), "online_conn_num", count)
} }
@ -394,7 +394,7 @@ func (ws *WServer) delUserConn(conn *UserConn) {
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp) log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp)
} }
promePkg.PromeGaugeDec(promePkg.OnlineUserGauge) prome.PromeGaugeDec(prome.OnlineUserGauge)
} }

View File

@ -1,29 +0,0 @@
package msgtransfer
import (
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
pbMsg "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
)
func saveUserChat(uid string, msg *pbMsg.MsgDataToMQ) error {
time := utils.GetCurrentTimestampByMill()
seq, err := db.DB.IncrUserSeq(uid)
if err != nil {
log.NewError(msg.OperationID, "data insert to redis err", err.Error(), msg.String())
return err
}
msg.MsgData.Seq = uint32(seq)
pbSaveData := pbMsg.MsgDataToDB{}
pbSaveData.MsgData = msg.MsgData
log.NewInfo(msg.OperationID, "IncrUserSeq cost time", utils.GetCurrentTimestampByMill()-time)
return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData)
// return db.DB.SaveUserChatMongo2(uid, pbSaveData.MsgData.SendTime, &pbSaveData)
}
func saveUserChatList(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) (error, uint64) {
log.Info(operationID, utils.GetSelfFuncName(), "args ", userID, len(msgList))
//return db.DB.BatchInsertChat(userID, msgList, operationID)
return db.DB.BatchInsertChat2Cache(userID, msgList, operationID)
}

View File

@ -2,83 +2,53 @@ package msgtransfer
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/prome"
"Open_IM/pkg/common/kafka"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/statistics"
"fmt" "fmt"
"sync"
) )
const OnlineTopicBusy = 1 type MsgTransfer struct {
const OnlineTopicVacancy = 0 persistentCH PersistentConsumerHandler // 聊天记录持久化到mysql的消费者 订阅的topic: ws2ms_chat
const Msg = 2 historyCH OnlineHistoryRedisConsumerHandler // 这个消费者聚合消息, 订阅的topicws2ms_chat, 修改通知发往msg_to_modify topic, 消息存入redis后Incr Redis, 再发消息到ms2pschat topic推送 发消息到msg_to_mongo topic持久化
const ConsumerMsgs = 3 historyMongoCH OnlineHistoryMongoConsumerHandler // mongoDB批量插入, 成功后删除redis中消息以及处理删除通知消息删除的 订阅的topic: msg_to_mongo
const AggregationMessages = 4 modifyCH ModifyMsgConsumerHandler // 负责消费修改消息通知的consumer, 订阅的topic: msg_to_modify
const MongoMessages = 5
const ChannelNum = 100
var (
persistentCH PersistentConsumerHandler
historyCH OnlineHistoryRedisConsumerHandler
historyMongoCH OnlineHistoryMongoConsumerHandler
modifyCH ModifyMsgConsumerHandler
producer *kafka.Producer
producerToModify *kafka.Producer
producerToMongo *kafka.Producer
cmdCh chan Cmd2Value
onlineTopicStatus int
w *sync.Mutex
singleMsgSuccessCount uint64
groupMsgCount uint64
singleMsgFailedCount uint64
singleMsgSuccessCountMutex sync.Mutex
)
func Init() {
cmdCh = make(chan Cmd2Value, 10000)
w = new(sync.Mutex)
if config.Config.Prometheus.Enable {
initPrometheus()
}
persistentCH.Init() // ws2mschat save mysql
historyCH.Init(cmdCh) //
historyMongoCH.Init()
modifyCH.Init()
onlineTopicStatus = OnlineTopicVacancy
//offlineHistoryCH.Init(cmdCh)
statistics.NewStatistics(&singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
statistics.NewStatistics(&groupMsgCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second groupMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
producer = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic)
producerToModify = kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic)
producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic)
} }
func Run(promethuesPort int) {
//register mysqlConsumerHandler to func NewMsgTransfer() *MsgTransfer {
if config.Config.ChatPersistenceMysql { msgTransfer := &MsgTransfer{}
go persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(&persistentCH) msgTransfer.persistentCH.Init()
} else { msgTransfer.historyCH.Init()
fmt.Println("not start mysql consumer") msgTransfer.historyMongoCH.Init()
msgTransfer.modifyCH.Init()
if config.Config.Prometheus.Enable {
msgTransfer.initPrometheus()
} }
go historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyCH) return msgTransfer
go historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&historyMongoCH) }
go modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(&modifyCH)
//go offlineHistoryCH.historyConsumerGroup.RegisterHandleAndConsumer(&offlineHistoryCH) func (m *MsgTransfer) initPrometheus() {
prome.NewSeqGetSuccessCounter()
prome.NewSeqGetFailedCounter()
prome.NewSeqSetSuccessCounter()
prome.NewSeqSetFailedCounter()
prome.NewMsgInsertRedisSuccessCounter()
prome.NewMsgInsertRedisFailedCounter()
prome.NewMsgInsertMongoSuccessCounter()
prome.NewMsgInsertMongoFailedCounter()
}
func (m *MsgTransfer) Run(promePort int) {
if config.Config.ChatPersistenceMysql {
go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(&m.persistentCH)
} else {
fmt.Println("msg transfer not start mysql consumer")
}
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(&m.historyCH)
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(&m.historyMongoCH)
go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(&m.modifyCH)
go func() { go func() {
err := promePkg.StartPromeSrv(promethuesPort) err := prome.StartPromeSrv(promePort)
if err != nil { if err != nil {
panic(err) panic(err)
} }
}() }()
} }
func SetOnlineTopicStatus(status int) {
w.Lock()
defer w.Unlock()
onlineTopicStatus = status
}
func GetOnlineTopicStatus() int {
w.Lock()
defer w.Unlock()
return onlineTopicStatus
}

View File

@ -1,14 +1,19 @@
package msgtransfer package msgtransfer
import ( import (
"Open_IM/pkg/apistruct"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/controller"
unRelationTb "Open_IM/pkg/common/db/table/unrelation"
kfk "Open_IM/pkg/common/kafka" kfk "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/tracelog"
pbMsg "Open_IM/pkg/proto/msg" pbMsg "Open_IM/pkg/proto/msg"
sdkws "Open_IM/pkg/proto/sdkws" sdkws "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context"
"encoding/json" "encoding/json"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
@ -16,13 +21,13 @@ import (
) )
type ModifyMsgConsumerHandler struct { type ModifyMsgConsumerHandler struct {
msgHandle map[string]fcb
modifyMsgConsumerGroup *kfk.MConsumerGroup modifyMsgConsumerGroup *kfk.MConsumerGroup
extendMsgInterface controller.ExtendMsgInterface
cache cache.Cache
} }
func (mmc *ModifyMsgConsumerHandler) Init() { func (mmc *ModifyMsgConsumerHandler) Init() {
mmc.msgHandle = make(map[string]fcb)
mmc.msgHandle[config.Config.Kafka.MsgToModify.Topic] = mmc.ModifyMsg
mmc.modifyMsgConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, mmc.modifyMsgConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToModify.Topic}, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToModify.Topic},
config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.ConsumerGroupID.MsgToModify) config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.ConsumerGroupID.MsgToModify)
@ -35,7 +40,7 @@ func (mmc *ModifyMsgConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi
for msg := range claim.Messages() { for msg := range claim.Messages() {
log.NewDebug("", "kafka get info to mysql", "ModifyMsgConsumerHandler", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) log.NewDebug("", "kafka get info to mysql", "ModifyMsgConsumerHandler", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key))
if len(msg.Value) != 0 { if len(msg.Value) != 0 {
mmc.msgHandle[msg.Topic](msg, string(msg.Key), sess) mmc.ModifyMsg(msg, string(msg.Key), sess)
} else { } else {
log.Error("", "msg get from kafka but is nil", msg.Key) log.Error("", "msg get from kafka but is nil", msg.Key)
} }
@ -58,6 +63,8 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg
if !isReactionFromCache { if !isReactionFromCache {
continue continue
} }
ctx := context.Background()
tracelog.SetOperationID(ctx, msgDataToMQ.OperationID)
if msgDataToMQ.MsgData.ContentType == constant.ReactionMessageModifier { if msgDataToMQ.MsgData.ContentType == constant.ReactionMessageModifier {
notification := &apistruct.ReactionMessageModifierNotification{} notification := &apistruct.ReactionMessageModifierNotification{}
if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil { if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil {
@ -69,21 +76,21 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg
} }
if !notification.IsReact { if !notification.IsReact {
// first time to modify // first time to modify
var reactionExtensionList = make(map[string]mongoDB.KeyValue) var reactionExtensionList = make(map[string]unRelationTb.KeyValueModel)
extendMsg := mongoDB.ExtendMsg{ extendMsg := unRelationTb.ExtendMsgModel{
ReactionExtensionList: reactionExtensionList, ReactionExtensionList: reactionExtensionList,
ClientMsgID: notification.ClientMsgID, ClientMsgID: notification.ClientMsgID,
MsgFirstModifyTime: notification.MsgFirstModifyTime, MsgFirstModifyTime: notification.MsgFirstModifyTime,
} }
for _, v := range notification.SuccessReactionExtensionList { for _, v := range notification.SuccessReactionExtensionList {
reactionExtensionList[v.TypeKey] = mongoDB.KeyValue{ reactionExtensionList[v.TypeKey] = unRelationTb.KeyValueModel{
TypeKey: v.TypeKey, TypeKey: v.TypeKey,
Value: v.Value, Value: v.Value,
LatestUpdateTime: v.LatestUpdateTime, LatestUpdateTime: v.LatestUpdateTime,
} }
} }
if err := db.DB.InsertExtendMsg(notification.SourceID, notification.SessionType, &extendMsg); err != nil { if err := mmc.extendMsgInterface.InsertExtendMsg(ctx, notification.SourceID, notification.SessionType, &extendMsg); err != nil {
log.NewError(msgDataToMQ.OperationID, "MsgFirstModify InsertExtendMsg failed", notification.SourceID, notification.SessionType, extendMsg, err.Error()) log.NewError(msgDataToMQ.OperationID, "MsgFirstModify InsertExtendMsg failed", notification.SourceID, notification.SessionType, extendMsg, err.Error())
continue continue
} }
@ -97,7 +104,7 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg
} }
} }
// is already modify // is already modify
if err := db.DB.InsertOrUpdateReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, reactionExtensionList); err != nil { if err := mmc.extendMsgInterface.InsertOrUpdateReactionExtendMsgSet(ctx, notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, reactionExtensionList); err != nil {
log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed") log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed")
} }
} }
@ -106,15 +113,10 @@ func (mmc *ModifyMsgConsumerHandler) ModifyMsg(cMsg *sarama.ConsumerMessage, msg
if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil { if err := json.Unmarshal(msgDataToMQ.MsgData.Content, notification); err != nil {
continue continue
} }
if err := db.DB.DeleteReactionExtendMsgSet(notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, notification.SuccessReactionExtensionList); err != nil { if err := mmc.extendMsgInterface.DeleteReactionExtendMsgSet(ctx, notification.SourceID, notification.SessionType, notification.ClientMsgID, notification.MsgFirstModifyTime, notification.SuccessReactionExtensionList); err != nil {
log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed") log.NewError(msgDataToMQ.OperationID, "InsertOrUpdateReactionExtendMsgSet failed")
} }
} }
} }
} }
func UnMarshallSetReactionMsgContent(content []byte) (notification *apistruct.ReactionMessageModifierNotification, err error) {
return notification, nil
}

View File

@ -3,62 +3,80 @@ package msgtransfer
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db/cache"
kfk "Open_IM/pkg/common/kafka" "Open_IM/pkg/common/db/controller"
"Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/tracelog"
pbMsg "Open_IM/pkg/proto/msg" pbMsg "Open_IM/pkg/proto/msg"
pbPush "Open_IM/pkg/proto/push" pbPush "Open_IM/pkg/proto/push"
"Open_IM/pkg/statistics"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"fmt"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"hash/crc32"
"strings"
"sync" "sync"
"time" "time"
) )
const ConsumerMsgs = 3
const AggregationMessages = 4
const MongoMessages = 5
const ChannelNum = 100
type MsgChannelValue struct { type MsgChannelValue struct {
aggregationID string //maybe userID or super groupID aggregationID string //maybe userID or super groupID
triggerID string triggerID string
msgList []*pbMsg.MsgDataToMQ msgList []*pbMsg.MsgDataToMQ
lastSeq uint64 lastSeq uint64
} }
type TriggerChannelValue struct { type TriggerChannelValue struct {
triggerID string triggerID string
cmsgList []*sarama.ConsumerMessage cMsgList []*sarama.ConsumerMessage
} }
type fcb func(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession)
type Cmd2Value struct { type Cmd2Value struct {
Cmd int Cmd int
Value interface{} Value interface{}
} }
type OnlineHistoryRedisConsumerHandler struct { type OnlineHistoryRedisConsumerHandler struct {
msgHandle map[string]fcb historyConsumerGroup *kafka.MConsumerGroup
historyConsumerGroup *kfk.MConsumerGroup
chArrays [ChannelNum]chan Cmd2Value chArrays [ChannelNum]chan Cmd2Value
msgDistributionCh chan Cmd2Value msgDistributionCh chan Cmd2Value
singleMsgSuccessCount uint64
singleMsgFailedCount uint64
singleMsgSuccessCountMutex sync.Mutex
singleMsgFailedCountMutex sync.Mutex
producerToPush *kafka.Producer
producerToModify *kafka.Producer
producerToMongo *kafka.Producer
msgInterface controller.MsgInterface
cache cache.Cache
} }
func (och *OnlineHistoryRedisConsumerHandler) Init(cmdCh chan Cmd2Value) { func (och *OnlineHistoryRedisConsumerHandler) Init() {
och.msgHandle = make(map[string]fcb)
och.msgDistributionCh = make(chan Cmd2Value) //no buffer channel och.msgDistributionCh = make(chan Cmd2Value) //no buffer channel
go och.MessagesDistributionHandle() go och.MessagesDistributionHandle()
for i := 0; i < ChannelNum; i++ { for i := 0; i < ChannelNum; i++ {
och.chArrays[i] = make(chan Cmd2Value, 50) och.chArrays[i] = make(chan Cmd2Value, 50)
go och.Run(i) go och.Run(i)
} }
if config.Config.ReliableStorage { och.producerToPush = kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic)
och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2Mongo och.producerToModify = kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic)
} else { och.producerToMongo = kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic)
och.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = och.handleChatWs2MongoLowReliability och.historyConsumerGroup = kafka.NewMConsumerGroup(&kafka.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
}
och.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic}, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic},
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToRedis) config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToRedis)
statistics.NewStatistics(&och.singleMsgSuccessCount, config.Config.ModuleName.MsgTransferName, fmt.Sprintf("%d second singleMsgCount insert to mongo", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
} }
func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) { func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
for { for {
select { select {
@ -72,6 +90,8 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
notStoragePushMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80) notStoragePushMsgList := make([]*pbMsg.MsgDataToMQ, 0, 80)
log.Debug(triggerID, "msg arrived channel", "channel id", channelID, msgList, msgChannelValue.aggregationID, len(msgList)) log.Debug(triggerID, "msg arrived channel", "channel id", channelID, msgList, msgChannelValue.aggregationID, len(msgList))
var modifyMsgList []*pbMsg.MsgDataToMQ var modifyMsgList []*pbMsg.MsgDataToMQ
ctx := context.Background()
tracelog.SetOperationID(ctx, triggerID)
for _, v := range msgList { for _, v := range msgList {
log.Debug(triggerID, "msg come to storage center", v.String()) log.Debug(triggerID, "msg come to storage center", v.String())
isHistory := utils.GetSwitchFromOptions(v.MsgData.Options, constant.IsHistory) isHistory := utils.GetSwitchFromOptions(v.MsgData.Options, constant.IsHistory)
@ -84,45 +104,36 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
notStoragePushMsgList = append(notStoragePushMsgList, v) notStoragePushMsgList = append(notStoragePushMsgList, v)
} }
} }
if v.MsgData.ContentType == constant.ReactionMessageModifier || v.MsgData.ContentType == constant.ReactionMessageDeleter { if v.MsgData.ContentType == constant.ReactionMessageModifier || v.MsgData.ContentType == constant.ReactionMessageDeleter {
modifyMsgList = append(modifyMsgList, v) modifyMsgList = append(modifyMsgList, v)
} }
} }
if len(modifyMsgList) > 0 { if len(modifyMsgList) > 0 {
sendMessageToModifyMQ(msgChannelValue.aggregationID, triggerID, modifyMsgList) och.sendMessageToModifyMQ(ctx, msgChannelValue.aggregationID, triggerID, modifyMsgList)
} }
//switch msgChannelValue.msg.MsgData.SessionType {
//case constant.SingleChatType:
//case constant.GroupChatType:
//case constant.NotificationChatType:
//default:
// log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String())
// return
//}
log.Debug(triggerID, "msg storage length", len(storageMsgList), "push length", len(notStoragePushMsgList)) log.Debug(triggerID, "msg storage length", len(storageMsgList), "push length", len(notStoragePushMsgList))
if len(storageMsgList) > 0 { if len(storageMsgList) > 0 {
err, lastSeq := saveUserChatList(msgChannelValue.aggregationID, storageMsgList, triggerID) lastSeq, err := och.msgInterface.BatchInsertChat2Cache(ctx, msgChannelValue.aggregationID, storageMsgList)
if err != nil { if err != nil {
singleMsgFailedCount += uint64(len(storageMsgList)) och.singleMsgFailedCountMutex.Lock()
och.singleMsgFailedCount += uint64(len(storageMsgList))
och.singleMsgFailedCountMutex.Unlock()
log.NewError(triggerID, "single data insert to redis err", err.Error(), storageMsgList) log.NewError(triggerID, "single data insert to redis err", err.Error(), storageMsgList)
} else { } else {
singleMsgSuccessCountMutex.Lock() och.singleMsgSuccessCountMutex.Lock()
singleMsgSuccessCount += uint64(len(storageMsgList)) och.singleMsgSuccessCount += uint64(len(storageMsgList))
singleMsgSuccessCountMutex.Unlock() och.singleMsgSuccessCountMutex.Unlock()
och.SendMessageToMongoCH(msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq) och.SendMessageToMongoCH(ctx, msgChannelValue.aggregationID, triggerID, storageMsgList, lastSeq)
for _, v := range storageMsgList { for _, v := range storageMsgList {
sendMessageToPushMQ(v, msgChannelValue.aggregationID) och.sendMessageToPushMQ(ctx, v, msgChannelValue.aggregationID)
} }
for _, x := range notStoragePushMsgList { for _, x := range notStoragePushMsgList {
sendMessageToPushMQ(x, msgChannelValue.aggregationID) och.sendMessageToPushMQ(ctx, x, msgChannelValue.aggregationID)
} }
} }
} else { } else {
for _, x := range notStoragePushMsgList { for _, v := range notStoragePushMsgList {
sendMessageToPushMQ(x, msgChannelValue.aggregationID) och.sendMessageToPushMQ(ctx, v, msgChannelValue.aggregationID)
} }
} }
} }
@ -130,62 +141,6 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
} }
} }
func (och *OnlineHistoryRedisConsumerHandler) SendMessageToMongoCH(aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ, lastSeq uint64) {
if len(messages) > 0 {
pid, offset, err := producerToMongo.SendMessage(&pbMsg.MsgDataToMongoByMQ{LastSeq: lastSeq, AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID)
if err != nil {
log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID)
} else {
// log.NewWarn(m.OperationID, "sendMsgToKafka client msgID ", m.MsgData.ClientMsgID)
}
}
//hashCode := getHashCode(aggregationID)
//channelID := hashCode % ChannelNum
//log.Debug(triggerID, "generate channelID", hashCode, channelID, aggregationID)
////go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) {
//och.chMongoArrays[channelID] <- Cmd2Value{Cmd: MongoMessages, Value: MsgChannelValue{aggregationID: aggregationID, msgList: messages, triggerID: triggerID, lastSeq: lastSeq}}
}
//func (och *OnlineHistoryRedisConsumerHandler) MongoMessageRun(channelID int) {
// for {
// select {
// case cmd := <-och.chMongoArrays[channelID]:
// switch cmd.Cmd {
// case MongoMessages:
// msgChannelValue := cmd.Value.(MsgChannelValue)
// msgList := msgChannelValue.msgList
// triggerID := msgChannelValue.triggerID
// aggregationID := msgChannelValue.aggregationID
// lastSeq := msgChannelValue.lastSeq
// err := db.DB.BatchInsertChat2DB(aggregationID, msgList, triggerID, lastSeq)
// if err != nil {
// log.NewError(triggerID, "single data insert to mongo err", err.Error(), msgList)
// }
// for _, v := range msgList {
// if v.MsgData.ContentType == constant.DeleteMessageNotification {
// tips := sdkws.TipsComm{}
// DeleteMessageTips := sdkws.DeleteMessageTips{}
// err := proto.Unmarshal(v.MsgData.Content, &tips)
// if err != nil {
// log.NewError(triggerID, "tips unmarshal err:", err.Error(), v.String())
// continue
// }
// err = proto.Unmarshal(tips.Detail, &DeleteMessageTips)
// if err != nil {
// log.NewError(triggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String())
// continue
// }
// if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil {
// log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList)
// }
//
// }
// }
// }
// }
// }
//}
func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() { func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() {
for { for {
aggregationMsgs := make(map[string][]*pbMsg.MsgDataToMQ, ChannelNum) aggregationMsgs := make(map[string][]*pbMsg.MsgDataToMQ, ChannelNum)
@ -195,7 +150,7 @@ func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() {
case ConsumerMsgs: case ConsumerMsgs:
triggerChannelValue := cmd.Value.(TriggerChannelValue) triggerChannelValue := cmd.Value.(TriggerChannelValue)
triggerID := triggerChannelValue.triggerID triggerID := triggerChannelValue.triggerID
consumerMessages := triggerChannelValue.cmsgList consumerMessages := triggerChannelValue.cMsgList
//Aggregation map[userid]message list //Aggregation map[userid]message list
log.Debug(triggerID, "batch messages come to distribution center", len(consumerMessages)) log.Debug(triggerID, "batch messages come to distribution center", len(consumerMessages))
for i := 0; i < len(consumerMessages); i++ { for i := 0; i < len(consumerMessages); i++ {
@ -218,155 +173,21 @@ func (och *OnlineHistoryRedisConsumerHandler) MessagesDistributionHandle() {
log.Debug(triggerID, "generate map list users len", len(aggregationMsgs)) log.Debug(triggerID, "generate map list users len", len(aggregationMsgs))
for aggregationID, v := range aggregationMsgs { for aggregationID, v := range aggregationMsgs {
if len(v) >= 0 { if len(v) >= 0 {
hashCode := getHashCode(aggregationID) hashCode := utils.GetHashCode(aggregationID)
channelID := hashCode % ChannelNum channelID := hashCode % ChannelNum
log.Debug(triggerID, "generate channelID", hashCode, channelID, aggregationID) log.Debug(triggerID, "generate channelID", hashCode, channelID, aggregationID)
//go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) {
och.chArrays[channelID] <- Cmd2Value{Cmd: AggregationMessages, Value: MsgChannelValue{aggregationID: aggregationID, msgList: v, triggerID: triggerID}} och.chArrays[channelID] <- Cmd2Value{Cmd: AggregationMessages, Value: MsgChannelValue{aggregationID: aggregationID, msgList: v, triggerID: triggerID}}
//}(channelID, userID, v)
} }
} }
} }
} }
}
}
func (mc *OnlineHistoryRedisConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) {
msg := cMsg.Value
now := time.Now()
msgFromMQ := pbMsg.MsgDataToMQ{}
err := proto.Unmarshal(msg, &msgFromMQ)
if err != nil {
log.Error("msg_transfer Unmarshal msg err", "", "msg", string(msg), "err", err.Error())
return
}
operationID := msgFromMQ.OperationID
log.NewInfo(operationID, "msg come mongo!!!", "", "msg", string(msg))
//Control whether to store offline messages (mongo)
isHistory := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsHistory)
//Control whether to store history messages (mysql)
isPersist := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsPersistent)
isSenderSync := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
switch msgFromMQ.MsgData.SessionType {
case constant.SingleChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = SingleChatType", isHistory, isPersist)
if isHistory {
err := saveUserChat(msgKey, &msgFromMQ)
if err != nil {
singleMsgFailedCount++
log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String())
return
}
singleMsgSuccessCountMutex.Lock()
singleMsgSuccessCount++
singleMsgSuccessCountMutex.Unlock()
log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now))
}
if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID {
} else {
go sendMessageToPush(&msgFromMQ, msgKey)
}
log.NewDebug(operationID, "saveSingleMsg cost time ", time.Since(now))
case constant.GroupChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = GroupChatType", isHistory, isPersist)
if isHistory {
err := saveUserChat(msgFromMQ.MsgData.RecvID, &msgFromMQ)
if err != nil {
log.NewError(operationID, "group data insert to mongo err", msgFromMQ.String(), msgFromMQ.MsgData.RecvID, err.Error())
return
}
groupMsgCount++
}
go sendMessageToPush(&msgFromMQ, msgFromMQ.MsgData.RecvID)
log.NewDebug(operationID, "saveGroupMsg cost time ", time.Since(now))
case constant.NotificationChatType:
log.NewDebug(msgFromMQ.OperationID, "msg_transfer msg type = NotificationChatType", isHistory, isPersist)
if isHistory {
err := saveUserChat(msgKey, &msgFromMQ)
if err != nil {
log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String())
return
}
log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now))
}
if !isSenderSync && msgKey == msgFromMQ.MsgData.SendID {
} else {
go sendMessageToPush(&msgFromMQ, msgKey)
}
log.NewDebug(operationID, "saveUserChat cost time ", time.Since(now))
default:
log.NewError(msgFromMQ.OperationID, "SessionType error", msgFromMQ.String())
return
}
sess.MarkMessage(cMsg, "")
log.NewDebug(msgFromMQ.OperationID, "msg_transfer handle topic data to database success...", msgFromMQ.String())
}
func (och *OnlineHistoryRedisConsumerHandler) handleChatWs2MongoLowReliability(cMsg *sarama.ConsumerMessage, msgKey string, sess sarama.ConsumerGroupSession) {
msg := cMsg.Value
msgFromMQ := pbMsg.MsgDataToMQ{}
err := proto.Unmarshal(msg, &msgFromMQ)
if err != nil {
log.Error("msg_transfer Unmarshal msg err", "", "msg", string(msg), "err", err.Error())
return
}
operationID := msgFromMQ.OperationID
log.NewInfo(operationID, "msg come mongo!!!", "", "msg", string(msg))
//Control whether to store offline messages (mongo)
isHistory := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsHistory)
isSenderSync := utils.GetSwitchFromOptions(msgFromMQ.MsgData.Options, constant.IsSenderSync)
if isHistory {
seq, err := db.DB.IncrUserSeq(msgKey)
if err != nil {
log.NewError(operationID, "data insert to redis err", err.Error(), string(msg))
return
}
sess.MarkMessage(cMsg, "")
msgFromMQ.MsgData.Seq = uint32(seq)
log.Debug(operationID, "send ch msg is ", msgFromMQ.String())
//och.msgCh <- Cmd2Value{Cmd: Msg, Value: MsgChannelValue{msgKey, msgFromMQ}}
//err := saveUserChat(msgKey, &msgFromMQ)
//if err != nil {
// singleMsgFailedCount++
// log.NewError(operationID, "single data insert to mongo err", err.Error(), msgFromMQ.String())
// return
//}
//singleMsgSuccessCountMutex.Lock()
//singleMsgSuccessCount++
//singleMsgSuccessCountMutex.Unlock()
//log.NewDebug(msgFromMQ.OperationID, "sendMessageToPush cost time ", time.Since(now))
} else {
if !(!isSenderSync && msgKey == msgFromMQ.MsgData.SendID) {
go sendMessageToPush(&msgFromMQ, msgKey)
}
} }
} }
func (OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
//func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
// claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
// log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition())
// for msg := range claim.Messages() {
// SetOnlineTopicStatus(OnlineTopicBusy)
// //och.TriggerCmd(OnlineTopicBusy)
// log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset())
// och.msgHandle[msg.Topic](msg, string(msg.Key), sess)
// if claim.HighWaterMarkOffset()-msg.Offset <= 1 {
// log.Debug("", "online msg consume end", claim.HighWaterMarkOffset(), msg.Offset)
// SetOnlineTopicStatus(OnlineTopicVacancy)
// och.TriggerCmd(OnlineTopicVacancy)
// }
// }
// return nil
//}
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
for { for {
if sess == nil { if sess == nil {
log.NewWarn("", " sess == nil, waiting ") log.NewWarn("", " sess == nil, waiting ")
@ -383,24 +204,6 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerG
go func() { go func() {
for { for {
select { select {
//case :
// triggerID = utils.OperationIDGenerator()
//
// log.NewDebug(triggerID, "claim.Messages ", msg)
// cMsg = append(cMsg, msg)
// if len(cMsg) >= 1000 {
// ccMsg := make([]*sarama.ConsumerMessage, 0, 1000)
// for _, v := range cMsg {
// ccMsg = append(ccMsg, v)
// }
// log.Debug(triggerID, "length trigger msg consumer start", len(ccMsg))
// och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
// triggerID: triggerID, cmsgList: ccMsg}}
// sess.MarkMessage(msg, "")
// cMsg = make([]*sarama.ConsumerMessage, 0, 1000)
// log.Debug(triggerID, "length trigger msg consumer end", len(cMsg))
// }
case <-t.C: case <-t.C:
if len(cMsg) > 0 { if len(cMsg) > 0 {
rwLock.Lock() rwLock.Lock()
@ -416,163 +219,53 @@ func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerG
for i := 0; i < len(ccMsg)/split; i++ { for i := 0; i < len(ccMsg)/split; i++ {
//log.Debug() //log.Debug()
och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
triggerID: triggerID, cmsgList: ccMsg[i*split : (i+1)*split]}} triggerID: triggerID, cMsgList: ccMsg[i*split : (i+1)*split]}}
} }
if (len(ccMsg) % split) > 0 { if (len(ccMsg) % split) > 0 {
och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{ och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
triggerID: triggerID, cmsgList: ccMsg[split*(len(ccMsg)/split):]}} triggerID: triggerID, cMsgList: ccMsg[split*(len(ccMsg)/split):]}}
} }
//sess.MarkMessage(ccMsg[len(cMsg)-1], "")
log.Debug(triggerID, "timer trigger msg consumer end", len(cMsg)) log.Debug(triggerID, "timer trigger msg consumer end", len(cMsg))
} }
} }
} }
}() }()
for msg := range claim.Messages() { for msg := range claim.Messages() {
//msgFromMQ := pbMsg.MsgDataToMQ{}
//err := proto.Unmarshal(msg.Value, &msgFromMQ)
//if err != nil {
// log.Error(triggerID, "msg_transfer Unmarshal msg err", "msg", string(msg.Value), "err", err.Error())
//}
//userID := string(msg.Key)
//hashCode := getHashCode(userID)
//channelID := hashCode % ChannelNum
//log.Debug(triggerID, "generate channelID", hashCode, channelID, userID)
////go func(cID uint32, userID string, messages []*pbMsg.MsgDataToMQ) {
//och.chArrays[channelID] <- Cmd2Value{Cmd: UserMessages, Value: MsgChannelValue{userID: userID, msgList: []*pbMsg.MsgDataToMQ{&msgFromMQ}, triggerID: msgFromMQ.OperationID}}
//sess.MarkMessage(msg, "")
rwLock.Lock() rwLock.Lock()
if len(msg.Value) != 0 { if len(msg.Value) != 0 {
cMsg = append(cMsg, msg) cMsg = append(cMsg, msg)
} }
rwLock.Unlock() rwLock.Unlock()
sess.MarkMessage(msg, "") sess.MarkMessage(msg, "")
//och.TriggerCmd(OnlineTopicBusy)
//log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset())
} }
return nil return nil
} }
//func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, func (och *OnlineHistoryRedisConsumerHandler) sendMessageToPushMQ(ctx context.Context, message *pbMsg.MsgDataToMQ, pushToUserID string) {
// claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
//
// for {
// if sess == nil {
// log.NewWarn("", " sess == nil, waiting ")
// time.Sleep(100 * time.Millisecond)
// } else {
// break
// }
// }
//
// log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition())
// cMsg := make([]*sarama.ConsumerMessage, 0, 1000)
// t := time.NewTicker(time.Duration(100) * time.Millisecond)
// var triggerID string
// for msg := range claim.Messages() {
// cMsg = append(cMsg, msg)
// //och.TriggerCmd(OnlineTopicBusy)
// select {
// //case :
// // triggerID = utils.OperationIDGenerator()
// //
// // log.NewDebug(triggerID, "claim.Messages ", msg)
// // cMsg = append(cMsg, msg)
// // if len(cMsg) >= 1000 {
// // ccMsg := make([]*sarama.ConsumerMessage, 0, 1000)
// // for _, v := range cMsg {
// // ccMsg = append(ccMsg, v)
// // }
// // log.Debug(triggerID, "length trigger msg consumer start", len(ccMsg))
// // och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
// // triggerID: triggerID, cmsgList: ccMsg}}
// // sess.MarkMessage(msg, "")
// // cMsg = make([]*sarama.ConsumerMessage, 0, 1000)
// // log.Debug(triggerID, "length trigger msg consumer end", len(cMsg))
// // }
//
// case <-t.C:
// if len(cMsg) > 0 {
// ccMsg := make([]*sarama.ConsumerMessage, 0, 1000)
// for _, v := range cMsg {
// ccMsg = append(ccMsg, v)
// }
// triggerID = utils.OperationIDGenerator()
// log.Debug(triggerID, "timer trigger msg consumer start", len(ccMsg))
// och.msgDistributionCh <- Cmd2Value{Cmd: ConsumerMsgs, Value: TriggerChannelValue{
// triggerID: triggerID, cmsgList: ccMsg}}
// sess.MarkMessage(cMsg[len(cMsg)-1], "")
// cMsg = make([]*sarama.ConsumerMessage, 0, 1000)
// log.Debug(triggerID, "timer trigger msg consumer end", len(cMsg))
// }
// default:
//
// }
// //log.NewDebug("", "online kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "online", msg.Offset, claim.HighWaterMarkOffset())
//
// }
// return nil
//}
func sendMessageToPush(message *pbMsg.MsgDataToMQ, pushToUserID string) {
log.Info(message.OperationID, "msg_transfer send message to push", "message", message.String())
rpcPushMsg := pbPush.PushMsgReq{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID}
mqPushMsg := pbMsg.PushMsgDataToMQ{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID}
grpcConn := rpc.GetDefaultConn(config.Config.Etcd.EtcdSchema, strings.Join(config.Config.Etcd.EtcdAddr, ","), config.Config.RpcRegisterName.OpenImPushName, message.OperationID)
if grpcConn != nil {
log.Error(rpcPushMsg.OperationID, "rpc dial failed", "push data", rpcPushMsg.String())
pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID)
if err != nil {
log.Error(mqPushMsg.OperationID, "kafka send failed", "send data", message.String(), "pid", pid, "offset", offset, "err", err.Error())
}
return
}
msgClient := pbPush.NewPushMsgServiceClient(grpcConn)
_, err := msgClient.PushMsg(context.Background(), &rpcPushMsg)
if err != nil {
log.Error(rpcPushMsg.OperationID, "rpc send failed", rpcPushMsg.OperationID, "push data", rpcPushMsg.String(), "err", err.Error())
pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID)
if err != nil {
log.Error(message.OperationID, "kafka send failed", mqPushMsg.OperationID, "send data", mqPushMsg.String(), "pid", pid, "offset", offset, "err", err.Error())
}
} else {
log.Info(message.OperationID, "rpc send success", rpcPushMsg.OperationID, "push data", rpcPushMsg.String())
}
}
func sendMessageToPushMQ(message *pbMsg.MsgDataToMQ, pushToUserID string) {
log.Info(message.OperationID, utils.GetSelfFuncName(), "msg ", message.String(), pushToUserID) log.Info(message.OperationID, utils.GetSelfFuncName(), "msg ", message.String(), pushToUserID)
rpcPushMsg := pbPush.PushMsgReq{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} rpcPushMsg := pbPush.PushMsgReq{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID}
mqPushMsg := pbMsg.PushMsgDataToMQ{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID} mqPushMsg := pbMsg.PushMsgDataToMQ{OperationID: message.OperationID, MsgData: message.MsgData, PushToUserID: pushToUserID}
pid, offset, err := producer.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID) pid, offset, err := och.producerToPush.SendMessage(&mqPushMsg, mqPushMsg.PushToUserID, rpcPushMsg.OperationID)
if err != nil { if err != nil {
log.Error(mqPushMsg.OperationID, "kafka send failed", "send data", message.String(), "pid", pid, "offset", offset, "err", err.Error()) log.Error(mqPushMsg.OperationID, "kafka send failed", "send data", message.String(), "pid", pid, "offset", offset, "err", err.Error())
} }
return return
} }
func sendMessageToModifyMQ(aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ) { func (och *OnlineHistoryRedisConsumerHandler) sendMessageToModifyMQ(ctx context.Context, aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ) {
if len(messages) > 0 { if len(messages) > 0 {
pid, offset, err := producerToModify.SendMessage(&pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID) pid, offset, err := och.producerToModify.SendMessage(&pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID)
if err != nil { if err != nil {
log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID) log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID)
} else {
// log.NewWarn(m.OperationID, "sendMsgToKafka client msgID ", m.MsgData.ClientMsgID)
} }
} }
} }
// String hashes a string to a unique hashcode. func (och *OnlineHistoryRedisConsumerHandler) SendMessageToMongoCH(ctx context.Context, aggregationID string, triggerID string, messages []*pbMsg.MsgDataToMQ, lastSeq uint64) {
// if len(messages) > 0 {
// crc32 returns a uint32, but for our use we need pid, offset, err := och.producerToMongo.SendMessage(&pbMsg.MsgDataToMongoByMQ{LastSeq: lastSeq, AggregationID: aggregationID, MessageList: messages, TriggerID: triggerID}, aggregationID, triggerID)
// and non negative integer. Here we cast to an integer if err != nil {
// and invert it if the result is negative. log.Error(triggerID, "kafka send failed", "send data", len(messages), "pid", pid, "offset", offset, "err", err.Error(), "key", aggregationID)
func getHashCode(s string) uint32 { }
return crc32.ChecksumIEEE([]byte(s)) }
} }

View File

@ -3,28 +3,29 @@ package msgtransfer
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/controller"
kfk "Open_IM/pkg/common/kafka" kfk "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/tracelog"
pbMsg "Open_IM/pkg/proto/msg" pbMsg "Open_IM/pkg/proto/msg"
sdkws "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
type OnlineHistoryMongoConsumerHandler struct { type OnlineHistoryMongoConsumerHandler struct {
msgHandle map[string]fcb
historyConsumerGroup *kfk.MConsumerGroup historyConsumerGroup *kfk.MConsumerGroup
msgInterface controller.MsgInterface
cache cache.Cache
} }
func (mc *OnlineHistoryMongoConsumerHandler) Init() { func (mc *OnlineHistoryMongoConsumerHandler) Init() {
mc.msgHandle = make(map[string]fcb)
mc.msgHandle[config.Config.Kafka.MsgToMongo.Topic] = mc.handleChatWs2Mongo
mc.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, mc.historyConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToMongo.Topic}, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.MsgToMongo.Topic},
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo) config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMongo)
} }
func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) { func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) {
msg := cMsg.Value msg := cMsg.Value
@ -35,14 +36,17 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con
return return
} }
log.Info(msgFromMQ.TriggerID, "BatchInsertChat2DB userID: ", msgFromMQ.AggregationID, "msgFromMQ.LastSeq: ", msgFromMQ.LastSeq) log.Info(msgFromMQ.TriggerID, "BatchInsertChat2DB userID: ", msgFromMQ.AggregationID, "msgFromMQ.LastSeq: ", msgFromMQ.LastSeq)
err = db.DB.BatchInsertChat2DB(msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.TriggerID, msgFromMQ.LastSeq) ctx := context.Background()
tracelog.SetOperationID(ctx, msgFromMQ.TriggerID)
//err = db.DB.BatchInsertChat2DB(msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.TriggerID, msgFromMQ.LastSeq)
err = mc.msgInterface.BatchInsertChat2DB(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.LastSeq)
if err != nil { if err != nil {
log.NewError(msgFromMQ.TriggerID, "single data insert to mongo err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID) log.NewError(msgFromMQ.TriggerID, "single data insert to mongo err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID)
} else { }
err = db.DB.DeleteMessageFromCache(msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.GetTriggerID()) //err = db.DB.DeleteMessageFromCache(msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.GetTriggerID())
if err != nil { err = mc.msgInterface.DeleteMessageFromCache(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList)
log.NewError(msgFromMQ.TriggerID, "remove cache msg from redis err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID) if err != nil {
} log.NewError(msgFromMQ.TriggerID, "remove cache msg from redis err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID)
} }
for _, v := range msgFromMQ.MessageList { for _, v := range msgFromMQ.MessageList {
if v.MsgData.ContentType == constant.DeleteMessageNotification { if v.MsgData.ContentType == constant.DeleteMessageNotification {
@ -58,23 +62,23 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con
log.NewError(msgFromMQ.TriggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String()) log.NewError(msgFromMQ.TriggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String())
continue continue
} }
if unexistSeqList, err := db.DB.DelMsgBySeqList(DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID); err != nil { if totalUnExistSeqs, err := mc.msgInterface.DelMsgBySeqs(ctx, DeleteMessageTips.UserID, DeleteMessageTips.SeqList); err != nil {
log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, v.OperationID, err.Error(), unexistSeqList) log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, "error:", err.Error(), "totalUnExistSeqs: ", totalUnExistSeqs)
} }
} }
} }
} }
func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition()) log.NewDebug("", "online new session msg come", claim.HighWaterMarkOffset(), claim.Topic(), claim.Partition())
for msg := range claim.Messages() { for msg := range claim.Messages() {
log.NewDebug("", "kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) log.NewDebug("", "kafka get info to mongo", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key))
if len(msg.Value) != 0 { if len(msg.Value) != 0 {
mc.msgHandle[msg.Topic](msg, string(msg.Key), sess) mc.handleChatWs2Mongo(msg, string(msg.Key), sess)
} else { } else {
log.Error("", "mongo msg get from kafka but is nil", msg.Key) log.Error("", "mongo msg get from kafka but is nil", msg.Key)
} }

View File

@ -9,7 +9,7 @@ package msgtransfer
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db/mysql_model/im_mysql_msg_model" "Open_IM/pkg/common/db/controller"
kfk "Open_IM/pkg/common/kafka" kfk "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
pbMsg "Open_IM/pkg/proto/msg" pbMsg "Open_IM/pkg/proto/msg"
@ -17,33 +17,17 @@ import (
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
promePkg "Open_IM/pkg/common/prometheus"
) )
type PersistentConsumerHandler struct { type PersistentConsumerHandler struct {
msgHandle map[string]fcb
persistentConsumerGroup *kfk.MConsumerGroup persistentConsumerGroup *kfk.MConsumerGroup
chatLogInterface controller.ChatLogInterface
} }
func (pc *PersistentConsumerHandler) Init() { func (pc *PersistentConsumerHandler) Init() {
pc.msgHandle = make(map[string]fcb)
pc.msgHandle[config.Config.Kafka.Ws2mschat.Topic] = pc.handleChatWs2Mysql
pc.persistentConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0, pc.persistentConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic}, OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic},
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMySql) config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMySql)
}
func initPrometheus() {
promePkg.NewSeqGetSuccessCounter()
promePkg.NewSeqGetFailedCounter()
promePkg.NewSeqSetSuccessCounter()
promePkg.NewSeqSetFailedCounter()
promePkg.NewMsgInsertRedisSuccessCounter()
promePkg.NewMsgInsertRedisFailedCounter()
promePkg.NewMsgInsertMongoSuccessCounter()
promePkg.NewMsgInsertMongoFailedCounter()
} }
func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) { func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) {
@ -75,7 +59,7 @@ func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMes
} }
if tag { if tag {
log.NewInfo(msgFromMQ.OperationID, "msg_transfer msg persisting", string(msg)) log.NewInfo(msgFromMQ.OperationID, "msg_transfer msg persisting", string(msg))
if err = im_mysql_msg_model.InsertMessageToChatLog(msgFromMQ); err != nil { if err = pc.chatLogInterface.CreateChatLog(msgFromMQ); err != nil {
log.NewError(msgFromMQ.OperationID, "Message insert failed", "err", err.Error(), "msg", msgFromMQ.String()) log.NewError(msgFromMQ.OperationID, "Message insert failed", "err", err.Error(), "msg", msgFromMQ.String())
return return
} }
@ -90,7 +74,7 @@ func (pc *PersistentConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi
for msg := range claim.Messages() { for msg := range claim.Messages() {
log.NewDebug("", "kafka get info to mysql", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key)) log.NewDebug("", "kafka get info to mysql", "msgTopic", msg.Topic, "msgPartition", msg.Partition, "msg", string(msg.Value), "key", string(msg.Key))
if len(msg.Value) != 0 { if len(msg.Value) != 0 {
pc.msgHandle[msg.Topic](msg, string(msg.Key), sess) pc.handleChatWs2Mysql(msg, string(msg.Key), sess)
} else { } else {
log.Error("", "msg get from kafka but is nil", msg.Key) log.Error("", "msg get from kafka but is nil", msg.Key)
} }
@ -98,15 +82,3 @@ func (pc *PersistentConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSessi
} }
return nil return nil
} }
1. 请求1 group Rpc 2. 请求2 发消息 sendMsg rpc
1 更改数据库
2. 删除哈希缓存
检测到哈希变了 群成员还没来得及删除有问题
3. 删除群成员缓存
4. 删除对应群成员加群缓存
5. 删除数量缓存

View File

@ -15,7 +15,7 @@ import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/kafka" "Open_IM/pkg/common/kafka"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/statistics" "Open_IM/pkg/statistics"
"fmt" "fmt"
) )
@ -53,15 +53,15 @@ func init() {
} }
func initPrometheus() { func initPrometheus() {
promePkg.NewMsgOfflinePushSuccessCounter() prome.NewMsgOfflinePushSuccessCounter()
promePkg.NewMsgOfflinePushFailedCounter() prome.NewMsgOfflinePushFailedCounter()
} }
func Run(promethuesPort int) { func Run(promethuesPort int) {
go rpcServer.run() go rpcServer.run()
go pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&pushCh) go pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&pushCh)
go func() { go func() {
err := promePkg.StartPromeSrv(promethuesPort) err := prome.StartPromeSrv(promethuesPort)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -5,7 +5,7 @@ import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/getcdv3" "Open_IM/pkg/getcdv3"
pbPush "Open_IM/pkg/proto/push" pbPush "Open_IM/pkg/proto/push"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
@ -47,11 +47,11 @@ func (r *RPCServer) run() {
defer listener.Close() defer listener.Close()
var grpcOpts []grpc.ServerOption var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"strings" "strings"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
@ -144,10 +144,10 @@ func MsgToUser(pushMsg *pbPush.PushMsgReq) {
} }
pushResult, err := offlinePusher.Push(UIDList, title, detailContent, pushMsg.OperationID, opts) pushResult, err := offlinePusher.Push(UIDList, title, detailContent, pushMsg.OperationID, opts)
if err != nil { if err != nil {
promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter) prome.PromeInc(prome.MsgOfflinePushFailedCounter)
log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error()) log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error())
} else { } else {
promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter) prome.PromeInc(prome.MsgOfflinePushSuccessCounter)
log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData) log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData)
} }
} }
@ -267,10 +267,10 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
} }
pushResult, err := offlinePusher.Push(needOfflinePushUserIDList, title, detailContent, pushMsg.OperationID, opts) pushResult, err := offlinePusher.Push(needOfflinePushUserIDList, title, detailContent, pushMsg.OperationID, opts)
if err != nil { if err != nil {
promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter) prome.PromeInc(prome.MsgOfflinePushFailedCounter)
log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error()) log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error())
} else { } else {
promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter) prome.PromeInc(prome.MsgOfflinePushSuccessCounter)
log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData) log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData)
} }
} }

View File

@ -8,7 +8,7 @@ import (
"Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/controller" "Open_IM/pkg/common/db/controller"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
"Open_IM/pkg/common/tracelog" "Open_IM/pkg/common/tracelog"
pbAuth "Open_IM/pkg/proto/auth" pbAuth "Open_IM/pkg/proto/auth"
@ -42,13 +42,13 @@ func (s *rpcAuth) Run() {
log.NewInfo(operationID, "listen network success ", listener, address) log.NewInfo(operationID, "listen network success ", listener, address)
var grpcOpts []grpc.ServerOption var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
promePkg.NewUserRegisterCounter() prome.NewUserRegisterCounter()
promePkg.NewUserLoginCounter() prome.NewUserLoginCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -6,14 +6,11 @@ import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/controller" "Open_IM/pkg/common/db/controller"
"Open_IM/pkg/common/db/relation" relationTb "Open_IM/pkg/common/db/relation"
"Open_IM/pkg/common/db/table" unrealationTb "Open_IM/pkg/common/db/unrelation"
"Open_IM/pkg/common/db/unrelation"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" "Open_IM/pkg/common/prome"
"Open_IM/pkg/getcdv3"
pbConversation "Open_IM/pkg/proto/conversation" pbConversation "Open_IM/pkg/proto/conversation"
pbUser "Open_IM/pkg/proto/user"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
@ -95,11 +92,11 @@ func (c *conversationServer) Run() {
//grpc server //grpc server
var grpcOpts []grpc.ServerOption var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -12,7 +12,7 @@ import (
relationTb "Open_IM/pkg/common/db/table/relation" relationTb "Open_IM/pkg/common/db/table/relation"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/middleware" "Open_IM/pkg/common/middleware"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
"Open_IM/pkg/common/tracelog" "Open_IM/pkg/common/tracelog"
pbFriend "Open_IM/pkg/proto/friend" pbFriend "Open_IM/pkg/proto/friend"
@ -75,11 +75,11 @@ func (s *friendServer) Run() {
var grpcOpts []grpc.ServerOption var grpcOpts []grpc.ServerOption
grpcOpts = append(grpcOpts, grpc.UnaryInterceptor(middleware.RpcServerInterceptor)) grpcOpts = append(grpcOpts, grpc.UnaryInterceptor(middleware.RpcServerInterceptor))
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -1,7 +1,7 @@
package group package group
import ( import (
cbApi "Open_IM/pkg/callback_struct" "Open_IM/pkg/callbackstruct"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db/table/relation" "Open_IM/pkg/common/db/table/relation"

View File

@ -12,7 +12,7 @@ import (
"Open_IM/pkg/common/db/unrelation" "Open_IM/pkg/common/db/unrelation"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
"Open_IM/pkg/common/middleware" "Open_IM/pkg/common/middleware"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
"Open_IM/pkg/common/tracelog" "Open_IM/pkg/common/tracelog"
discoveryRegistry "Open_IM/pkg/discoveryregistry" discoveryRegistry "Open_IM/pkg/discoveryregistry"
@ -70,9 +70,7 @@ func NewGroupServer(port int) *groupServer {
if err != nil { if err != nil {
panic(err.Error()) panic(err.Error())
} }
//conns, err := g.registerCenter.GetConns(config.Config.RpcRegisterName.OpenImConversationName) //conns, err := g.registerCenter.GetConns(config.Config.RpcRegisterName.OpenImConversationName)
g.GroupInterface = controller.NewGroupInterface(mysql.GormConn(), redis.GetClient(), mongo.GetClient()) g.GroupInterface = controller.NewGroupInterface(mysql.GormConn(), redis.GetClient(), mongo.GetClient())
return &g return &g
} }
@ -98,11 +96,11 @@ func (s *groupServer) Run() {
grpc.UnaryInterceptor(middleware.RpcServerInterceptor), grpc.UnaryInterceptor(middleware.RpcServerInterceptor),
} }
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -9,7 +9,7 @@ import (
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
sdkws "Open_IM/pkg/proto/sdkws" sdkws "Open_IM/pkg/proto/sdkws"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
) )
func (rpc *rpcChat) GetMaxAndMinSeq(_ context.Context, in *sdkws.GetMaxAndMinSeqReq) (*sdkws.GetMaxAndMinSeqResp, error) { func (rpc *rpcChat) GetMaxAndMinSeq(_ context.Context, in *sdkws.GetMaxAndMinSeqReq) (*sdkws.GetMaxAndMinSeqResp, error) {
@ -53,25 +53,25 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *sdkws.PullMessag
redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(in.UserID, in.SeqList, in.OperationID) redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(in.UserID, in.SeqList, in.OperationID)
if err != nil { if err != nil {
if err != go_redis.Nil { if err != go_redis.Nil {
promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList) log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList)
} else { } else {
log.Debug(in.OperationID, "get message from redis is nil", failedSeqList) log.Debug(in.OperationID, "get message from redis is nil", failedSeqList)
} }
msgList, err1 := commonDB.DB.GetMsgBySeqListMongo2(in.UserID, failedSeqList, in.OperationID) msgList, err1 := commonDB.DB.GetMsgBySeqs(in.UserID, failedSeqList, in.OperationID)
if err1 != nil { if err1 != nil {
promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error()) log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error())
resp.ErrCode = 201 resp.ErrCode = 201
resp.ErrMsg = err1.Error() resp.ErrMsg = err1.Error()
return resp, nil return resp, nil
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList))
redisMsgList = append(redisMsgList, msgList...) redisMsgList = append(redisMsgList, msgList...)
resp.List = redisMsgList resp.List = redisMsgList
} }
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList))
resp.List = redisMsgList resp.List = redisMsgList
} }
@ -80,26 +80,26 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *sdkws.PullMessag
redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(k, v.SeqList, in.OperationID) redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(k, v.SeqList, in.OperationID)
if err != nil { if err != nil {
if err != go_redis.Nil { if err != go_redis.Nil {
promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList) log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList)
} else { } else {
log.Debug(in.OperationID, "get message from redis is nil", failedSeqList) log.Debug(in.OperationID, "get message from redis is nil", failedSeqList)
} }
msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqListMongo(k, failedSeqList, in.OperationID) msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqs(k, failedSeqList, in.OperationID)
if err1 != nil { if err1 != nil {
promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error()) log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err1.Error())
resp.ErrCode = 201 resp.ErrCode = 201
resp.ErrMsg = err1.Error() resp.ErrMsg = err1.Error()
return resp, nil return resp, nil
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList))
redisMsgList = append(redisMsgList, msgList...) redisMsgList = append(redisMsgList, msgList...)
x.MsgDataList = redisMsgList x.MsgDataList = redisMsgList
m[k] = x m[k] = x
} }
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList))
x.MsgDataList = redisMsgList x.MsgDataList = redisMsgList
m[k] = x m[k] = x
} }

View File

@ -3,7 +3,7 @@ package msg
import ( import (
commonDB "Open_IM/pkg/common/db" commonDB "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/proto/msg" "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
@ -16,20 +16,20 @@ func (rpc *rpcChat) GetSuperGroupMsg(context context.Context, req *msg.GetSuperG
redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(req.GroupID, []uint32{req.Seq}, req.OperationID) redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(req.GroupID, []uint32{req.Seq}, req.OperationID)
if err != nil { if err != nil {
if err != go_redis.Nil { if err != go_redis.Nil {
promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqList))
log.Error(req.OperationID, "get message from redis exception", err.Error(), failedSeqList) log.Error(req.OperationID, "get message from redis exception", err.Error(), failedSeqList)
} else { } else {
log.Debug(req.OperationID, "get message from redis is nil", failedSeqList) log.Debug(req.OperationID, "get message from redis is nil", failedSeqList)
} }
msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqListMongo(req.GroupID, failedSeqList, req.OperationID) msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqs(req.GroupID, failedSeqList, req.OperationID)
if err1 != nil { if err1 != nil {
promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList)) prome.PromeAdd(prome.MsgPullFromMongoFailedCounter, len(failedSeqList))
log.Error(req.OperationID, "GetSuperGroupMsg data error", req.String(), err.Error()) log.Error(req.OperationID, "GetSuperGroupMsg data error", req.String(), err.Error())
resp.ErrCode = 201 resp.ErrCode = 201
resp.ErrMsg = err.Error() resp.ErrMsg = err.Error()
return resp, nil return resp, nil
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList)) prome.PromeAdd(prome.MsgPullFromMongoSuccessCounter, len(msgList))
redisMsgList = append(redisMsgList, msgList...) redisMsgList = append(redisMsgList, msgList...)
for _, m := range msgList { for _, m := range msgList {
resp.MsgData = m resp.MsgData = m
@ -37,7 +37,7 @@ func (rpc *rpcChat) GetSuperGroupMsg(context context.Context, req *msg.GetSuperG
} }
} else { } else {
promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList)) prome.PromeAdd(prome.MsgPullFromRedisSuccessCounter, len(redisMsgList))
for _, m := range redisMsgList { for _, m := range redisMsgList {
resp.MsgData = m resp.MsgData = m
} }

View File

@ -6,7 +6,7 @@ import (
"Open_IM/pkg/common/db" "Open_IM/pkg/common/db"
"Open_IM/pkg/common/kafka" "Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log" "Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/proto/msg" "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"github.com/OpenIMSDK/getcdv3" "github.com/OpenIMSDK/getcdv3"
@ -66,21 +66,21 @@ func (rpc *rpcChat) initPrometheus() {
// Name: "send_msg_failed", // Name: "send_msg_failed",
// Help: "The number of send msg failed", // Help: "The number of send msg failed",
//}) //})
promePkg.NewMsgPullFromRedisSuccessCounter() prome.NewMsgPullFromRedisSuccessCounter()
promePkg.NewMsgPullFromRedisFailedCounter() prome.NewMsgPullFromRedisFailedCounter()
promePkg.NewMsgPullFromMongoSuccessCounter() prome.NewMsgPullFromMongoSuccessCounter()
promePkg.NewMsgPullFromMongoFailedCounter() prome.NewMsgPullFromMongoFailedCounter()
promePkg.NewSingleChatMsgRecvSuccessCounter() prome.NewSingleChatMsgRecvSuccessCounter()
promePkg.NewGroupChatMsgRecvSuccessCounter() prome.NewGroupChatMsgRecvSuccessCounter()
promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter() prome.NewWorkSuperGroupChatMsgRecvSuccessCounter()
promePkg.NewSingleChatMsgProcessSuccessCounter() prome.NewSingleChatMsgProcessSuccessCounter()
promePkg.NewSingleChatMsgProcessFailedCounter() prome.NewSingleChatMsgProcessFailedCounter()
promePkg.NewGroupChatMsgProcessSuccessCounter() prome.NewGroupChatMsgProcessSuccessCounter()
promePkg.NewGroupChatMsgProcessFailedCounter() prome.NewGroupChatMsgProcessFailedCounter()
promePkg.NewWorkSuperGroupChatMsgProcessSuccessCounter() prome.NewWorkSuperGroupChatMsgProcessSuccessCounter()
promePkg.NewWorkSuperGroupChatMsgProcessFailedCounter() prome.NewWorkSuperGroupChatMsgProcessFailedCounter()
} }
func (rpc *rpcChat) Run() { func (rpc *rpcChat) Run() {
@ -104,11 +104,11 @@ func (rpc *rpcChat) Run() {
grpc.MaxSendMsgSize(sendSize), grpc.MaxSendMsgSize(sendSize),
} }
if config.Config.Prometheus.Enable { if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter() prome.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)
@ -146,9 +146,9 @@ func (rpc *rpcChat) runCh() {
case msg := <-rpc.delMsgCh: case msg := <-rpc.delMsgCh:
log.NewInfo(msg.OperationID, utils.GetSelfFuncName(), "delmsgch recv new: ", msg) log.NewInfo(msg.OperationID, utils.GetSelfFuncName(), "delmsgch recv new: ", msg)
db.DB.DelMsgFromCache(msg.UserID, msg.SeqList, msg.OperationID) db.DB.DelMsgFromCache(msg.UserID, msg.SeqList, msg.OperationID)
unexistSeqList, err := db.DB.DelMsgBySeqList(msg.UserID, msg.SeqList, msg.OperationID) unexistSeqList, err := db.DB.DelMsgBySeqs(msg.UserID, msg.SeqList, msg.OperationID)
if err != nil { if err != nil {
log.NewError(msg.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqList args: ", msg.UserID, msg.SeqList, msg.OperationID, err.Error()) log.NewError(msg.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", msg.UserID, msg.SeqList, msg.OperationID, err.Error())
continue continue
} }
if len(unexistSeqList) > 0 { if len(unexistSeqList) > 0 {

View File

@ -23,7 +23,7 @@ import (
"sync" "sync"
"time" "time"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
go_redis "github.com/go-redis/redis/v8" go_redis "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
@ -369,7 +369,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
} }
switch pb.MsgData.SessionType { switch pb.MsgData.SessionType {
case constant.SingleChatType: case constant.SingleChatType:
promePkg.PromeInc(promePkg.SingleChatMsgRecvSuccessCounter) prome.PromeInc(prome.SingleChatMsgRecvSuccessCounter)
// callback // callback
t1 = time.Now() t1 = time.Now()
callbackResp := callbackBeforeSendSingleMsg(pb) callbackResp := callbackBeforeSendSingleMsg(pb)
@ -382,7 +382,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
callbackResp.ErrCode = 201 callbackResp.ErrCode = 201
} }
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp) log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp)
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) prome.PromeInc(prome.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
} }
t1 = time.Now() t1 = time.Now()
@ -402,7 +402,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1)) log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1))
if err1 != nil { if err1 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err :RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String(), err1.Error()) log.NewError(msgToMQSingle.OperationID, "kafka send msg err :RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String(), err1.Error())
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) prome.PromeInc(prome.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
} }
} }
@ -412,7 +412,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1)) log.Info(pb.OperationID, "sendMsgToWriter ", " cost time: ", time.Since(t1))
if err2 != nil { if err2 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err:SendID", msgToMQSingle.MsgData.SendID, msgToMQSingle.String()) log.NewError(msgToMQSingle.OperationID, "kafka send msg err:SendID", msgToMQSingle.MsgData.SendID, msgToMQSingle.String())
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter) prome.PromeInc(prome.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
} }
} }
@ -423,11 +423,11 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSingleMsg resp: ", callbackResp) log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSingleMsg resp: ", callbackResp)
} }
promePkg.PromeInc(promePkg.SingleChatMsgProcessSuccessCounter) prome.PromeInc(prome.SingleChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
case constant.GroupChatType: case constant.GroupChatType:
// callback // callback
promePkg.PromeInc(promePkg.GroupChatMsgRecvSuccessCounter) prome.PromeInc(prome.GroupChatMsgRecvSuccessCounter)
callbackResp := callbackBeforeSendGroupMsg(pb) callbackResp := callbackBeforeSendGroupMsg(pb)
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendGroupMsg resp:", callbackResp) log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendGroupMsg resp:", callbackResp)
@ -437,12 +437,12 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
callbackResp.ErrCode = 201 callbackResp.ErrCode = 201
} }
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp) log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp)
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) prome.PromeInc(prome.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
} }
var memberUserIDList []string var memberUserIDList []string
if flag, errCode, errMsg, memberUserIDList = rpc.messageVerification(ctx, pb); !flag { if flag, errCode, errMsg, memberUserIDList = rpc.messageVerification(ctx, pb); !flag {
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) prome.PromeInc(prome.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, errCode, errMsg, "", 0) return returnMsg(&replay, pb, errCode, errMsg, "", 0)
} }
log.Debug(pb.OperationID, "GetGroupAllMember userID list", memberUserIDList, "len: ", len(memberUserIDList)) log.Debug(pb.OperationID, "GetGroupAllMember userID list", memberUserIDList, "len: ", len(memberUserIDList))
@ -506,7 +506,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
} }
if !sendTag { if !sendTag {
log.NewWarn(pb.OperationID, "send tag is ", sendTag) log.NewWarn(pb.OperationID, "send tag is ", sendTag)
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter) prome.PromeInc(prome.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
} else { } else {
if pb.MsgData.ContentType == constant.AtText { if pb.MsgData.ContentType == constant.AtText {
@ -571,7 +571,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
}() }()
} }
log.Debug(pb.OperationID, "send msg cost time3 ", time.Since(t1), pb.MsgData.ClientMsgID) log.Debug(pb.OperationID, "send msg cost time3 ", time.Since(t1), pb.MsgData.ClientMsgID)
promePkg.PromeInc(promePkg.GroupChatMsgProcessSuccessCounter) prome.PromeInc(prome.GroupChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
} }
case constant.NotificationChatType: case constant.NotificationChatType:
@ -595,7 +595,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
log.Debug(pb.OperationID, "send msg cost time ", time.Since(t1), pb.MsgData.ClientMsgID) log.Debug(pb.OperationID, "send msg cost time ", time.Since(t1), pb.MsgData.ClientMsgID)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
case constant.SuperGroupChatType: case constant.SuperGroupChatType:
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgRecvSuccessCounter) prome.PromeInc(prome.WorkSuperGroupChatMsgRecvSuccessCounter)
// callback // callback
callbackResp := callbackBeforeSendGroupMsg(pb) callbackResp := callbackBeforeSendGroupMsg(pb)
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
@ -605,12 +605,12 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
if callbackResp.ErrCode == 0 { if callbackResp.ErrCode == 0 {
callbackResp.ErrCode = 201 callbackResp.ErrCode = 201
} }
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter)
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSuperGroupMsg result", "end rpc and return", callbackResp) log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSuperGroupMsg result", "end rpc and return", callbackResp)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0) return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
} }
if flag, errCode, errMsg, _ = rpc.messageVerification(ctx, pb); !flag { if flag, errCode, errMsg, _ = rpc.messageVerification(ctx, pb); !flag {
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, errCode, errMsg, "", 0) return returnMsg(&replay, pb, errCode, errMsg, "", 0)
} }
msgToMQSingle.MsgData = pb.MsgData msgToMQSingle.MsgData = pb.MsgData
@ -618,7 +618,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
err1 := rpc.sendMsgToWriter(ctx, &msgToMQSingle, msgToMQSingle.MsgData.GroupID, constant.OnlineStatus) err1 := rpc.sendMsgToWriter(ctx, &msgToMQSingle, msgToMQSingle.MsgData.GroupID, constant.OnlineStatus)
if err1 != nil { if err1 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err:RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String()) log.NewError(msgToMQSingle.OperationID, "kafka send msg err:RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String())
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter) prome.PromeInc(prome.WorkSuperGroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0) return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
} }
// callback // callback
@ -626,7 +626,7 @@ func (rpc *rpcChat) SendMsg(ctx context.Context, pb *pbChat.SendMsgReq) (*pbChat
if callbackResp.ErrCode != 0 { if callbackResp.ErrCode != 0 {
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSuperGroupMsg resp: ", callbackResp) log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSuperGroupMsg resp: ", callbackResp)
} }
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessSuccessCounter) prome.PromeInc(prome.WorkSuperGroupChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime) return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
default: default:

View File

@ -1 +0,0 @@
package user

View File

@ -3,6 +3,7 @@ package user
import ( import (
"Open_IM/internal/common/convert" "Open_IM/internal/common/convert"
"Open_IM/internal/common/rpc_server" "Open_IM/internal/common/rpc_server"
"Open_IM/internal/common/rpcserver"
chat "Open_IM/internal/rpc/msg" chat "Open_IM/internal/rpc/msg"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
@ -65,7 +66,7 @@ func (s *userServer) Run() {
prome.NewGrpcRequestFailedCounter() prome.NewGrpcRequestFailedCounter()
prome.NewGrpcRequestSuccessCounter() prome.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{ grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme), // grpc.UnaryInterceptor(prome.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor), grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor), grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...) }...)

View File

@ -225,10 +225,6 @@ type config struct {
Addr []string `yaml:"addr"` Addr []string `yaml:"addr"`
Topic string `yaml:"topic"` Topic string `yaml:"topic"`
} }
//Ws2mschatOffline struct {
// Addr []string `yaml:"addr"`
// Topic string `yaml:"topic"`
//}
MsgToMongo struct { MsgToMongo struct {
Addr []string `yaml:"addr"` Addr []string `yaml:"addr"`
Topic string `yaml:"topic"` Topic string `yaml:"topic"`
@ -252,7 +248,6 @@ type config struct {
Secret string `yaml:"secret"` Secret string `yaml:"secret"`
MultiLoginPolicy int `yaml:"multiloginpolicy"` MultiLoginPolicy int `yaml:"multiloginpolicy"`
ChatPersistenceMysql bool `yaml:"chatpersistencemysql"` ChatPersistenceMysql bool `yaml:"chatpersistencemysql"`
ReliableStorage bool `yaml:"reliablestorage"`
MsgCacheTimeout int `yaml:"msgCacheTimeout"` MsgCacheTimeout int `yaml:"msgCacheTimeout"`
GroupMessageHasReadReceiptEnable bool `yaml:"groupMessageHasReadReceiptEnable"` GroupMessageHasReadReceiptEnable bool `yaml:"groupMessageHasReadReceiptEnable"`
SingleMessageHasReadReceiptEnable bool `yaml:"singleMessageHasReadReceiptEnable"` SingleMessageHasReadReceiptEnable bool `yaml:"singleMessageHasReadReceiptEnable"`

View File

@ -21,7 +21,7 @@ const (
conversationExpireTime = time.Second * 60 * 60 * 12 conversationExpireTime = time.Second * 60 * 60 * 12
) )
// args fn will exec when no data in cache // arg fn will exec when no data in cache
type ConversationCache interface { type ConversationCache interface {
// get user's conversationIDs from cache // get user's conversationIDs from cache
GetUserConversationIDs(ctx context.Context, userID string, fn func(ctx context.Context, userID string) ([]string, error)) ([]string, error) GetUserConversationIDs(ctx context.Context, userID string, fn func(ctx context.Context, userID string) ([]string, error)) ([]string, error)
@ -96,7 +96,7 @@ func (c *ConversationRedis) GetUserConversationIDs(ctx context.Context, ownerUse
// return nil, utils.Wrap(err, "") // return nil, utils.Wrap(err, "")
//} //}
//return conversationIDs, nil //return conversationIDs, nil
return GetCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), time.Second*30*60, func(ctx context.Context) ([]string, error) { return GetCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), conversationExpireTime, func(ctx context.Context) ([]string, error) {
return f(ownerUserID) return f(ownerUserID)
}) })
} }
@ -122,7 +122,7 @@ func (c *ConversationRedis) GetUserConversationIDs1(ctx context.Context, ownerUs
// return nil, utils.Wrap(err, "") // return nil, utils.Wrap(err, "")
//} //}
//return conversationIDs, nil //return conversationIDs, nil
return GetCache1[[]string](c.rcClient, c.getConversationIDsKey(ownerUserID), time.Second*30*60, fn) return GetCache1[[]string](c.rcClient, c.getConversationIDsKey(ownerUserID), conversationExpireTime, fn)
} }
//func GetCache1[T any](rcClient *rockscache.Client, key string, expire time.Duration, fn func() (any, error)) (T, error) { //func GetCache1[T any](rcClient *rockscache.Client, key string, expire time.Duration, fn func() (any, error)) (T, error) {

View File

@ -3,10 +3,9 @@ package cache
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
log2 "Open_IM/pkg/common/log"
pbChat "Open_IM/pkg/proto/msg" pbChat "Open_IM/pkg/proto/msg"
pbRtc "Open_IM/pkg/proto/rtc" pbRtc "Open_IM/pkg/proto/rtc"
pbCommon "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"errors" "errors"
@ -39,13 +38,52 @@ const (
) )
type Cache interface { type Cache interface {
IncrUserSeq(uid string) (uint64, error) IncrUserSeq(ctx context.Context, userID string) (uint64, error)
GetUserMaxSeq(uid string) (uint64, error) GetUserMaxSeq(ctx context.Context, userID string) (uint64, error)
SetUserMaxSeq(uid string, maxSeq uint64) error SetUserMaxSeq(ctx context.Context, userID string, maxSeq uint64) error
SetUserMinSeq(uid string, minSeq uint32) (err error) SetUserMinSeq(ctx context.Context, userID string, minSeq uint64) (err error)
GetUserMinSeq(uid string) (uint64, error) GetUserMinSeq(ctx context.Context, userID string) (uint64, error)
SetGroupUserMinSeq(groupID, userID string, minSeq uint64) (err error) SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq uint64) (err error)
GetGroupUserMinSeq(groupID, userID string) (uint64, error) GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (uint64, error)
GetGroupMaxSeq(ctx context.Context, groupID string) (uint64, error)
IncrGroupMaxSeq(ctx context.Context, groupID string) (uint64, error)
SetGroupMaxSeq(ctx context.Context, groupID string, maxSeq uint64) error
SetGroupMinSeq(ctx context.Context, groupID string, minSeq uint32) error
AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
GetTokenMapByUidPid(ctx context.Context, userID, platformID string) (map[string]int, error)
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
GetMessageListBySeq(ctx context.Context, userID string, seqList []uint32) (seqMsg []*sdkws.MsgData, failedSeqList []uint32, err error)
SetMessageToCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) (int, error)
DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) error
CleanUpOneUserAllMsgFromRedis(ctx context.Context, userID string) error
HandleSignalInfo(ctx context.Context, msg *sdkws.MsgData, pushToUserID string) (isSend bool, err error)
GetSignalInfoFromCacheByClientMsgID(ctx context.Context, clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error)
GetAvailableSignalInvitationInfo(ctx context.Context, userID string) (invitationInfo *pbRtc.SignalInviteReq, err error)
DelUserSignalList(ctx context.Context, userID string) error
DelMsgFromCache(ctx context.Context, userID string, seqList []uint32) error
SetGetuiToken(ctx context.Context, token string, expireTime int64) error
GetGetuiToken(ctx context.Context) (string, error)
SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error
GetGetuiTaskID(ctx context.Context) (string, error)
SetSendMsgStatus(ctx context.Context, status int32) error
GetSendMsgStatus(ctx context.Context) (int, error)
SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error)
GetFcmToken(ctx context.Context, account string, platformID int) (string, error)
DelFcmToken(ctx context.Context, account string, platformID int) error
IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error
GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
JudgeMessageReactionEXISTS(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error
} }
// native redis operate // native redis operate
@ -54,7 +92,7 @@ type RedisClient struct {
rdb redis.UniversalClient rdb redis.UniversalClient
} }
func (r *RedisClient) InitRedis() { func (r *RedisClient) InitRedis() error {
var rdb redis.UniversalClient var rdb redis.UniversalClient
var err error var err error
ctx := context.Background() ctx := context.Background()
@ -67,8 +105,8 @@ func (r *RedisClient) InitRedis() {
}) })
_, err = rdb.Ping(ctx).Result() _, err = rdb.Ping(ctx).Result()
if err != nil { if err != nil {
fmt.Println("redis cluster failed address ", config.Config.Redis.DBAddress) fmt.Println("redis cluster failed address ", config.Config.Redis.DBAddress, config.Config.Redis.DBUserName, config.Config.Redis.DBPassWord)
panic(err.Error() + " redis cluster " + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord) return err
} }
} else { } else {
rdb = redis.NewClient(&redis.Options{ rdb = redis.NewClient(&redis.Options{
@ -80,10 +118,12 @@ func (r *RedisClient) InitRedis() {
}) })
_, err = rdb.Ping(ctx).Result() _, err = rdb.Ping(ctx).Result()
if err != nil { if err != nil {
panic(err.Error() + " redis " + config.Config.Redis.DBAddress[0] + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord) fmt.Println(" redis " + config.Config.Redis.DBAddress[0] + config.Config.Redis.DBUserName + config.Config.Redis.DBPassWord)
return err
} }
} }
r.rdb = rdb r.rdb = rdb
return nil
} }
func (r *RedisClient) GetClient() redis.UniversalClient { func (r *RedisClient) GetClient() redis.UniversalClient {
@ -95,80 +135,78 @@ func NewRedisClient(rdb redis.UniversalClient) *RedisClient {
} }
// Perform seq auto-increment operation of user messages // Perform seq auto-increment operation of user messages
func (r *RedisClient) IncrUserSeq(uid string) (uint64, error) { func (r *RedisClient) IncrUserSeq(ctx context.Context, uid string) (uint64, error) {
key := userIncrSeq + uid key := userIncrSeq + uid
seq, err := r.rdb.Incr(context.Background(), key).Result() seq, err := r.rdb.Incr(context.Background(), key).Result()
return uint64(seq), err return uint64(seq), err
} }
// Get the largest Seq // Get the largest Seq
func (r *RedisClient) GetUserMaxSeq(uid string) (uint64, error) { func (r *RedisClient) GetUserMaxSeq(ctx context.Context, uid string) (uint64, error) {
key := userIncrSeq + uid key := userIncrSeq + uid
seq, err := r.rdb.Get(context.Background(), key).Result() seq, err := r.rdb.Get(context.Background(), key).Result()
return uint64(utils.StringToInt(seq)), err return uint64(utils.StringToInt(seq)), err
} }
// set the largest Seq // set the largest Seq
func (r *RedisClient) SetUserMaxSeq(uid string, maxSeq uint64) error { func (r *RedisClient) SetUserMaxSeq(ctx context.Context, uid string, maxSeq uint64) error {
key := userIncrSeq + uid key := userIncrSeq + uid
return r.rdb.Set(context.Background(), key, maxSeq, 0).Err() return r.rdb.Set(context.Background(), key, maxSeq, 0).Err()
} }
// Set the user's minimum seq // Set the user's minimum seq
func (r *RedisClient) SetUserMinSeq(uid string, minSeq uint32) (err error) { func (r *RedisClient) SetUserMinSeq(ctx context.Context, uid string, minSeq uint64) (err error) {
key := userMinSeq + uid key := userMinSeq + uid
return r.rdb.Set(context.Background(), key, minSeq, 0).Err() return r.rdb.Set(context.Background(), key, minSeq, 0).Err()
} }
// Get the smallest Seq // Get the smallest Seq
func (r *RedisClient) GetUserMinSeq(uid string) (uint64, error) { func (r *RedisClient) GetUserMinSeq(ctx context.Context, uid string) (uint64, error) {
key := userMinSeq + uid key := userMinSeq + uid
seq, err := r.rdb.Get(context.Background(), key).Result() seq, err := r.rdb.Get(context.Background(), key).Result()
return uint64(utils.StringToInt(seq)), err return uint64(utils.StringToInt(seq)), err
} }
func (r *RedisClient) SetGroupUserMinSeq(groupID, userID string, minSeq uint64) (err error) { func (r *RedisClient) SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq uint64) (err error) {
key := groupUserMinSeq + "g:" + groupID + "u:" + userID key := groupUserMinSeq + "g:" + groupID + "u:" + userID
return r.rdb.Set(context.Background(), key, minSeq, 0).Err() return r.rdb.Set(context.Background(), key, minSeq, 0).Err()
} }
func (r *RedisClient) GetGroupUserMinSeq(groupID, userID string) (uint64, error) { func (r *RedisClient) GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (uint64, error) {
key := groupUserMinSeq + "g:" + groupID + "u:" + userID key := groupUserMinSeq + "g:" + groupID + "u:" + userID
seq, err := r.rdb.Get(context.Background(), key).Result() seq, err := r.rdb.Get(context.Background(), key).Result()
return uint64(utils.StringToInt(seq)), err return uint64(utils.StringToInt(seq)), err
} }
func (r *RedisClient) GetGroupMaxSeq(groupID string) (uint64, error) { func (r *RedisClient) GetGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) {
key := groupMaxSeq + groupID key := groupMaxSeq + groupID
seq, err := r.rdb.Get(context.Background(), key).Result() seq, err := r.rdb.Get(context.Background(), key).Result()
return uint64(utils.StringToInt(seq)), err return uint64(utils.StringToInt(seq)), err
} }
func (r *RedisClient) IncrGroupMaxSeq(groupID string) (uint64, error) { func (r *RedisClient) IncrGroupMaxSeq(ctx context.Context, groupID string) (uint64, error) {
key := groupMaxSeq + groupID key := groupMaxSeq + groupID
seq, err := r.rdb.Incr(context.Background(), key).Result() seq, err := r.rdb.Incr(context.Background(), key).Result()
return uint64(seq), err return uint64(seq), err
} }
func (r *RedisClient) SetGroupMaxSeq(groupID string, maxSeq uint64) error { func (r *RedisClient) SetGroupMaxSeq(ctx context.Context, groupID string, maxSeq uint64) error {
key := groupMaxSeq + groupID key := groupMaxSeq + groupID
return r.rdb.Set(context.Background(), key, maxSeq, 0).Err() return r.rdb.Set(context.Background(), key, maxSeq, 0).Err()
} }
func (r *RedisClient) SetGroupMinSeq(groupID string, minSeq uint32) error { func (r *RedisClient) SetGroupMinSeq(ctx context.Context, groupID string, minSeq uint32) error {
key := groupMinSeq + groupID key := groupMinSeq + groupID
return r.rdb.Set(context.Background(), key, minSeq, 0).Err() return r.rdb.Set(context.Background(), key, minSeq, 0).Err()
} }
// Store userid and platform class to redis // Store userid and platform class to redis
func (r *RedisClient) AddTokenFlag(userID string, platformID int, token string, flag int) error { func (r *RedisClient) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
log2.NewDebug("", "add token key is ", key)
return r.rdb.HSet(context.Background(), key, token, flag).Err() return r.rdb.HSet(context.Background(), key, token, flag).Err()
} }
func (r *RedisClient) GetTokenMapByUidPid(userID, platformID string) (map[string]int, error) { func (r *RedisClient) GetTokenMapByUidPid(ctx context.Context, userID, platformID string) (map[string]int, error) {
key := uidPidToken + userID + ":" + platformID key := uidPidToken + userID + ":" + platformID
log2.NewDebug("", "get token key is ", key)
m, err := r.rdb.HGetAll(context.Background(), key).Result() m, err := r.rdb.HGetAll(context.Background(), key).Result()
mm := make(map[string]int) mm := make(map[string]int)
for k, v := range m { for k, v := range m {
@ -176,7 +214,7 @@ func (r *RedisClient) GetTokenMapByUidPid(userID, platformID string) (map[string
} }
return mm, err return mm, err
} }
func (r *RedisClient) SetTokenMapByUidPid(userID string, platformID int, m map[string]int) error { func (r *RedisClient) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
mm := make(map[string]interface{}) mm := make(map[string]interface{})
for k, v := range m { for k, v := range m {
@ -185,12 +223,12 @@ func (r *RedisClient) SetTokenMapByUidPid(userID string, platformID int, m map[s
return r.rdb.HSet(context.Background(), key, mm).Err() return r.rdb.HSet(context.Background(), key, mm).Err()
} }
func (r *RedisClient) DeleteTokenByUidPid(userID string, platformID int, fields []string) error { func (r *RedisClient) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID) key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
return r.rdb.HDel(context.Background(), key, fields...).Err() return r.rdb.HDel(context.Background(), key, fields...).Err()
} }
func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, operationID string) (seqMsg []*pbCommon.MsgData, failedSeqList []uint32, errResult error) { func (r *RedisClient) GetMessageListBySeq(ctx context.Context, userID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, failedSeqList []uint32, errResult error) {
for _, v := range seqList { for _, v := range seqList {
//MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1 //MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
key := messageCache + userID + "_" + strconv.Itoa(int(v)) key := messageCache + userID + "_" + strconv.Itoa(int(v))
@ -198,16 +236,13 @@ func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, opera
if err != nil { if err != nil {
errResult = err errResult = err
failedSeqList = append(failedSeqList, v) failedSeqList = append(failedSeqList, v)
log2.Debug(operationID, "redis get message error: ", err.Error(), v)
} else { } else {
msg := pbCommon.MsgData{} msg := sdkws.MsgData{}
err = jsonpb.UnmarshalString(result, &msg) err = jsonpb.UnmarshalString(result, &msg)
if err != nil { if err != nil {
errResult = err errResult = err
failedSeqList = append(failedSeqList, v) failedSeqList = append(failedSeqList, v)
log2.NewWarn(operationID, "Unmarshal err ", result, err.Error())
} else { } else {
log2.NewDebug(operationID, "redis get msg is ", msg.String())
seqMsg = append(seqMsg, &msg) seqMsg = append(seqMsg, &msg)
} }
@ -216,48 +251,40 @@ func (r *RedisClient) GetMessageListBySeq(userID string, seqList []uint32, opera
return seqMsg, failedSeqList, errResult return seqMsg, failedSeqList, errResult
} }
func (r *RedisClient) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) (error, int) { func (r *RedisClient) SetMessageToCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ, uid string) (int, error) {
ctx := context.Background()
pipe := r.rdb.Pipeline() pipe := r.rdb.Pipeline()
var failedList []pbChat.MsgDataToMQ var failedList []pbChat.MsgDataToMQ
for _, msg := range msgList { for _, msg := range msgList {
key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq)) key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq))
s, err := utils.Pb2String(msg.MsgData) s, err := utils.Pb2String(msg.MsgData)
if err != nil { if err != nil {
log2.NewWarn(operationID, utils.GetSelfFuncName(), "Pb2String failed", msg.MsgData.String(), uid, err.Error())
continue continue
} }
log2.NewDebug(operationID, "convert string is ", s)
err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err() err = pipe.Set(ctx, key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err()
//err = r.rdb.HMSet(context.Background(), "12", map[string]interface{}{"1": 2, "343": false}).Err() //err = r.rdb.HMSet(context.Background(), "12", map[string]interface{}{"1": 2, "343": false}).Err()
if err != nil { if err != nil {
log2.NewWarn(operationID, utils.GetSelfFuncName(), "redis failed", "args:", key, *msg, uid, s, err.Error())
failedList = append(failedList, *msg) failedList = append(failedList, *msg)
} }
} }
if len(failedList) != 0 { if len(failedList) != 0 {
return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID)), len(failedList) return len(failedList), errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList))
} }
_, err := pipe.Exec(ctx) _, err := pipe.Exec(ctx)
return err, 0 return 0, err
} }
func (r *RedisClient) DeleteMessageFromCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error { func (r *RedisClient) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) error {
ctx := context.Background()
for _, msg := range msgList { for _, msg := range msgList {
key := messageCache + uid + "_" + strconv.Itoa(int(msg.MsgData.Seq)) key := messageCache + userID + "_" + strconv.Itoa(int(msg.MsgData.Seq))
err := r.rdb.Del(ctx, key).Err() err := r.rdb.Del(ctx, key).Err()
if err != nil { if err != nil {
log2.NewWarn(operationID, utils.GetSelfFuncName(), "redis failed", "args:", key, uid, err.Error(), msgList)
} }
} }
return nil return nil
} }
func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(userID string, operationID string) error { func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(ctx context.Context, userID string) error {
ctx := context.Background()
key := messageCache + userID + "_" + "*" key := messageCache + userID + "_" + "*"
vals, err := r.rdb.Keys(ctx, key).Result() vals, err := r.rdb.Keys(ctx, key).Result()
log2.Debug(operationID, "vals: ", vals)
if err == redis.Nil { if err == redis.Nil {
return nil return nil
} }
@ -270,7 +297,7 @@ func (r *RedisClient) CleanUpOneUserAllMsgFromRedis(userID string, operationID s
return nil return nil
} }
func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData, pushToUserID string) (isSend bool, err error) { func (r *RedisClient) HandleSignalInfo(ctx context.Context, operationID string, msg *sdkws.MsgData, pushToUserID string) (isSend bool, err error) {
req := &pbRtc.SignalReq{} req := &pbRtc.SignalReq{}
if err := proto.Unmarshal(msg.Content, req); err != nil { if err := proto.Unmarshal(msg.Content, req); err != nil {
return false, err return false, err
@ -294,9 +321,7 @@ func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData
return false, nil return false, nil
} }
if isInviteSignal { if isInviteSignal {
log2.NewDebug(operationID, utils.GetSelfFuncName(), "invite userID list:", inviteeUserIDList)
for _, userID := range inviteeUserIDList { for _, userID := range inviteeUserIDList {
log2.NewInfo(operationID, utils.GetSelfFuncName(), "invite userID:", userID)
timeout, err := strconv.Atoi(config.Config.Rtc.SignalTimeout) timeout, err := strconv.Atoi(config.Config.Rtc.SignalTimeout)
if err != nil { if err != nil {
return false, err return false, err
@ -320,7 +345,7 @@ func (r *RedisClient) HandleSignalInfo(operationID string, msg *pbCommon.MsgData
return true, nil return true, nil
} }
func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(ctx context.Context, clientMsgID string) (invitationInfo *pbRtc.SignalInviteReq, err error) {
key := signalCache + clientMsgID key := signalCache + clientMsgID
invitationInfo = &pbRtc.SignalInviteReq{} invitationInfo = &pbRtc.SignalInviteReq{}
bytes, err := r.rdb.Get(context.Background(), key).Bytes() bytes, err := r.rdb.Get(context.Background(), key).Bytes()
@ -342,7 +367,7 @@ func (r *RedisClient) GetSignalInfoFromCacheByClientMsgID(clientMsgID string) (i
return invitationInfo, err return invitationInfo, err
} }
func (r *RedisClient) GetAvailableSignalInvitationInfo(userID string) (invitationInfo *pbRtc.SignalInviteReq, err error) { func (r *RedisClient) GetAvailableSignalInvitationInfo(ctx context.Context, userID string) (invitationInfo *pbRtc.SignalInviteReq, err error) {
keyList := signalListCache + userID keyList := signalListCache + userID
result := r.rdb.LPop(context.Background(), keyList) result := r.rdb.LPop(context.Background(), keyList)
if err = result.Err(); err != nil { if err = result.Err(); err != nil {
@ -352,76 +377,70 @@ func (r *RedisClient) GetAvailableSignalInvitationInfo(userID string) (invitatio
if err != nil { if err != nil {
return nil, utils.Wrap(err, "GetAvailableSignalInvitationInfo failed") return nil, utils.Wrap(err, "GetAvailableSignalInvitationInfo failed")
} }
log2.NewDebug("", utils.GetSelfFuncName(), result, result.String()) invitationInfo, err = r.GetSignalInfoFromCacheByClientMsgID(ctx, key)
invitationInfo, err = r.GetSignalInfoFromCacheByClientMsgID(key)
if err != nil { if err != nil {
return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID") return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID")
} }
err = r.DelUserSignalList(userID) err = r.DelUserSignalList(ctx, userID)
if err != nil { if err != nil {
return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID") return nil, utils.Wrap(err, "GetSignalInfoFromCacheByClientMsgID")
} }
return invitationInfo, nil return invitationInfo, nil
} }
func (r *RedisClient) DelUserSignalList(userID string) error { func (r *RedisClient) DelUserSignalList(ctx context.Context, userID string) error {
keyList := signalListCache + userID keyList := signalListCache + userID
err := r.rdb.Del(context.Background(), keyList).Err() err := r.rdb.Del(context.Background(), keyList).Err()
return err return err
} }
func (r *RedisClient) DelMsgFromCache(uid string, seqList []uint32, operationID string) { func (r *RedisClient) DelMsgFromCache(ctx context.Context, uid string, seqList []uint32, operationID string) {
for _, seq := range seqList { for _, seq := range seqList {
key := messageCache + uid + "_" + strconv.Itoa(int(seq)) key := messageCache + uid + "_" + strconv.Itoa(int(seq))
result, err := r.rdb.Get(context.Background(), key).Result() result, err := r.rdb.Get(context.Background(), key).Result()
if err != nil { if err != nil {
if err == redis.Nil { if err == redis.Nil {
log2.NewDebug(operationID, utils.GetSelfFuncName(), err.Error(), "redis nil")
} else { } else {
log2.NewError(operationID, utils.GetSelfFuncName(), err.Error(), key)
} }
continue continue
} }
var msg pbCommon.MsgData var msg sdkws.MsgData
if err := utils.String2Pb(result, &msg); err != nil { if err := utils.String2Pb(result, &msg); err != nil {
log2.Error(operationID, utils.GetSelfFuncName(), "String2Pb failed", msg, result, key, err.Error())
continue continue
} }
msg.Status = constant.MsgDeleted msg.Status = constant.MsgDeleted
s, err := utils.Pb2String(&msg) s, err := utils.Pb2String(&msg)
if err != nil { if err != nil {
log2.Error(operationID, utils.GetSelfFuncName(), "Pb2String failed", msg, err.Error())
continue continue
} }
if err := r.rdb.Set(context.Background(), key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil { if err := r.rdb.Set(context.Background(), key, s, time.Duration(config.Config.MsgCacheTimeout)*time.Second).Err(); err != nil {
log2.Error(operationID, utils.GetSelfFuncName(), "Set failed", err.Error())
} }
} }
} }
func (r *RedisClient) SetGetuiToken(token string, expireTime int64) error { func (r *RedisClient) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
return r.rdb.Set(context.Background(), getuiToken, token, time.Duration(expireTime)*time.Second).Err() return r.rdb.Set(context.Background(), getuiToken, token, time.Duration(expireTime)*time.Second).Err()
} }
func (r *RedisClient) GetGetuiToken() (string, error) { func (r *RedisClient) GetGetuiToken(ctx context.Context) (string, error) {
result, err := r.rdb.Get(context.Background(), getuiToken).Result() result, err := r.rdb.Get(context.Background(), getuiToken).Result()
return result, err return result, err
} }
func (r *RedisClient) SetGetuiTaskID(taskID string, expireTime int64) error { func (r *RedisClient) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
return r.rdb.Set(context.Background(), getuiTaskID, taskID, time.Duration(expireTime)*time.Second).Err() return r.rdb.Set(context.Background(), getuiTaskID, taskID, time.Duration(expireTime)*time.Second).Err()
} }
func (r *RedisClient) GetGetuiTaskID() (string, error) { func (r *RedisClient) GetGetuiTaskID(ctx context.Context) (string, error) {
result, err := r.rdb.Get(context.Background(), getuiTaskID).Result() result, err := r.rdb.Get(context.Background(), getuiTaskID).Result()
return result, err return result, err
} }
func (r *RedisClient) SetSendMsgStatus(status int32, operationID string) error { func (r *RedisClient) SetSendMsgStatus(ctx context.Context, status int32, operationID string) error {
return r.rdb.Set(context.Background(), sendMsgFailedFlag+operationID, status, time.Hour*24).Err() return r.rdb.Set(context.Background(), sendMsgFailedFlag+operationID, status, time.Hour*24).Err()
} }
func (r *RedisClient) GetSendMsgStatus(operationID string) (int, error) { func (r *RedisClient) GetSendMsgStatus(ctx context.Context, operationID string) (int, error) {
result, err := r.rdb.Get(context.Background(), sendMsgFailedFlag+operationID).Result() result, err := r.rdb.Get(context.Background(), sendMsgFailedFlag+operationID).Result()
if err != nil { if err != nil {
return 0, err return 0, err
@ -430,75 +449,71 @@ func (r *RedisClient) GetSendMsgStatus(operationID string) (int, error) {
return status, err return status, err
} }
func (r *RedisClient) SetFcmToken(account string, platformID int, fcmToken string, expireTime int64) (err error) { func (r *RedisClient) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
key := FcmToken + account + ":" + strconv.Itoa(platformID) key := FcmToken + account + ":" + strconv.Itoa(platformID)
return r.rdb.Set(context.Background(), key, fcmToken, time.Duration(expireTime)*time.Second).Err() return r.rdb.Set(context.Background(), key, fcmToken, time.Duration(expireTime)*time.Second).Err()
} }
func (r *RedisClient) GetFcmToken(account string, platformID int) (string, error) { func (r *RedisClient) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
key := FcmToken + account + ":" + strconv.Itoa(platformID) key := FcmToken + account + ":" + strconv.Itoa(platformID)
return r.rdb.Get(context.Background(), key).Result() return r.rdb.Get(context.Background(), key).Result()
} }
func (r *RedisClient) DelFcmToken(account string, platformID int) error { func (r *RedisClient) DelFcmToken(ctx context.Context, account string, platformID int) error {
key := FcmToken + account + ":" + strconv.Itoa(platformID) key := FcmToken + account + ":" + strconv.Itoa(platformID)
return r.rdb.Del(context.Background(), key).Err() return r.rdb.Del(context.Background(), key).Err()
} }
func (r *RedisClient) IncrUserBadgeUnreadCountSum(uid string) (int, error) { func (r *RedisClient) IncrUserBadgeUnreadCountSum(ctx context.Context, uid string) (int, error) {
key := userBadgeUnreadCountSum + uid key := userBadgeUnreadCountSum + uid
seq, err := r.rdb.Incr(context.Background(), key).Result() seq, err := r.rdb.Incr(context.Background(), key).Result()
return int(seq), err return int(seq), err
} }
func (r *RedisClient) SetUserBadgeUnreadCountSum(uid string, value int) error { func (r *RedisClient) SetUserBadgeUnreadCountSum(ctx context.Context, uid string, value int) error {
key := userBadgeUnreadCountSum + uid key := userBadgeUnreadCountSum + uid
return r.rdb.Set(context.Background(), key, value, 0).Err() return r.rdb.Set(context.Background(), key, value, 0).Err()
} }
func (r *RedisClient) GetUserBadgeUnreadCountSum(uid string) (int, error) { func (r *RedisClient) GetUserBadgeUnreadCountSum(ctx context.Context, uid string) (int, error) {
key := userBadgeUnreadCountSum + uid key := userBadgeUnreadCountSum + uid
seq, err := r.rdb.Get(context.Background(), key).Result() seq, err := r.rdb.Get(context.Background(), key).Result()
return utils.StringToInt(seq), err return utils.StringToInt(seq), err
} }
func (r *RedisClient) JudgeMessageReactionEXISTS(clientMsgID string, sessionType int32) (bool, error) { func (r *RedisClient) JudgeMessageReactionEXISTS(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
n, err := r.rdb.Exists(context.Background(), key).Result() n, err := r.rdb.Exists(context.Background(), key).Result()
if n > 0 { return n > 0, err
return true, err
} else {
return false, err
}
} }
func (r *RedisClient) GetOneMessageAllReactionList(clientMsgID string, sessionType int32) (map[string]string, error) { func (r *RedisClient) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
return r.rdb.HGetAll(context.Background(), key).Result() return r.rdb.HGetAll(context.Background(), key).Result()
} }
func (r *RedisClient) DeleteOneMessageKey(clientMsgID string, sessionType int32, subKey string) error { func (r *RedisClient) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
return r.rdb.HDel(context.Background(), key, subKey).Err() return r.rdb.HDel(context.Background(), key, subKey).Err()
} }
func (r *RedisClient) SetMessageReactionExpire(clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) { func (r *RedisClient) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
return r.rdb.Expire(context.Background(), key, expiration).Result() return r.rdb.Expire(context.Background(), key, expiration).Result()
} }
func (r *RedisClient) GetMessageTypeKeyValue(clientMsgID string, sessionType int32, typeKey string) (string, error) { func (r *RedisClient) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
result, err := r.rdb.HGet(context.Background(), key, typeKey).Result() result, err := r.rdb.HGet(context.Background(), key, typeKey).Result()
return result, err return result, err
} }
func (r *RedisClient) SetMessageTypeKeyValue(clientMsgID string, sessionType int32, typeKey, value string) error { func (r *RedisClient) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error {
key := getMessageReactionExPrefix(clientMsgID, sessionType) key := r.getMessageReactionExPrefix(clientMsgID, sessionType)
return r.rdb.HSet(context.Background(), key, typeKey, value).Err() return r.rdb.HSet(context.Background(), key, typeKey, value).Err()
} }
func (r *RedisClient) LockMessageTypeKey(clientMsgID string, TypeKey string) error { func (r *RedisClient) LockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return r.rdb.SetNX(context.Background(), key, 1, time.Minute).Err() return r.rdb.SetNX(context.Background(), key, 1, time.Minute).Err()
} }
func (r *RedisClient) UnLockMessageTypeKey(clientMsgID string, TypeKey string) error { func (r *RedisClient) UnLockMessageTypeKey(ctx context.Context, clientMsgID string, TypeKey string) error {
key := exTypeKeyLocker + clientMsgID + "_" + TypeKey key := exTypeKeyLocker + clientMsgID + "_" + TypeKey
return r.rdb.Del(context.Background(), key).Err() return r.rdb.Del(context.Background(), key).Err()

View File

@ -5,7 +5,7 @@ import (
"Open_IM/pkg/common/tokenverify" "Open_IM/pkg/common/tokenverify"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
go_redis "github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
"github.com/golang-jwt/jwt/v4" "github.com/golang-jwt/jwt/v4"
) )
@ -34,7 +34,7 @@ func NewTokenRedis(redisClient *RedisClient, accessSecret string, accessExpire i
func (t *TokenRedis) GetTokensWithoutError(ctx context.Context, userID, platform string) (map[string]int, error) { func (t *TokenRedis) GetTokensWithoutError(ctx context.Context, userID, platform string) (map[string]int, error) {
key := uidPidToken + userID + ":" + platform key := uidPidToken + userID + ":" + platform
m, err := t.RedisClient.GetClient().HGetAll(context.Background(), key).Result() m, err := t.RedisClient.GetClient().HGetAll(context.Background(), key).Result()
if err != nil && err == go_redis.Nil { if err != nil && err == redis.Nil {
return nil, nil return nil, nil
} }
mm := make(map[string]int) mm := make(map[string]int)

View File

@ -0,0 +1,99 @@
package controller
import (
unRelationTb "Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/proto/sdkws"
"context"
"github.com/go-redis/redis/v8"
"go.mongodb.org/mongo-driver/mongo"
)
type ExtendMsgInterface interface {
CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error
GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error)
GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error)
InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error
InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error)
}
type ExtendMsgController struct {
database ExtendMsgDatabase
}
func NewExtendMsgController(mgo *mongo.Client, rdb redis.UniversalClient) *ExtendMsgController {
return &ExtendMsgController{}
}
func (e *ExtendMsgController) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error {
return e.database.CreateExtendMsgSet(ctx, set)
}
func (e *ExtendMsgController) GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) {
return e.GetAllExtendMsgSet(ctx, ID, opts)
}
func (e *ExtendMsgController) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) {
return e.GetExtendMsgSet(ctx, sourceID, sessionType, maxMsgUpdateTime)
}
func (e *ExtendMsgController) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error {
return e.InsertExtendMsg(ctx, sourceID, sessionType, msg)
}
func (e *ExtendMsgController) InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error {
return e.InsertOrUpdateReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList)
}
func (e *ExtendMsgController) DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error {
return e.DeleteReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList)
}
func (e *ExtendMsgController) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) {
return e.GetExtendMsg(ctx, sourceID, sessionType, clientMsgID, maxMsgUpdateTime)
}
type ExtendMsgDatabaseInterface interface {
CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error
GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error)
GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error)
InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error
InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error)
}
type ExtendMsgDatabase struct {
model unRelationTb.ExtendMsgSetModelInterface
}
func NewExtendMsgDatabase() ExtendMsgDatabaseInterface {
return &ExtendMsgDatabase{}
}
func (e *ExtendMsgDatabase) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error {
return e.model.CreateExtendMsgSet(ctx, set)
}
func (e *ExtendMsgDatabase) GetAllExtendMsgSet(ctx context.Context, sourceID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) {
return e.model.GetAllExtendMsgSet(ctx, sourceID, opts)
}
func (e *ExtendMsgDatabase) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) {
return e.model.GetExtendMsgSet(ctx, sourceID, sessionType, maxMsgUpdateTime)
}
func (e *ExtendMsgDatabase) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error {
return e.model.InsertExtendMsg(ctx, sourceID, sessionType, msg)
}
func (e *ExtendMsgDatabase) InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error {
return e.InsertOrUpdateReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList)
}
func (e *ExtendMsgDatabase) DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error {
return e.DeleteReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, reactionExtensionList)
}
func (e *ExtendMsgDatabase) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) {
return e.GetExtendMsg(ctx, sourceID, sessionType, clientMsgID, maxMsgUpdateTime)
}

View File

@ -5,7 +5,7 @@ import (
"Open_IM/pkg/common/db/cache" "Open_IM/pkg/common/db/cache"
"Open_IM/pkg/common/db/relation" "Open_IM/pkg/common/db/relation"
relationTb "Open_IM/pkg/common/db/table/relation" relationTb "Open_IM/pkg/common/db/table/relation"
unrelationTb "Open_IM/pkg/common/db/table/unrelation" unRelationTb "Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/common/db/unrelation" "Open_IM/pkg/common/db/unrelation"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
@ -47,8 +47,8 @@ type GroupInterface interface {
TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error) TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error)
PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error) PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error)
// SuperGroup // SuperGroup
FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error)
FindJoinSuperGroup(ctx context.Context, userID string) (superGroup *unrelationTb.UserToSuperGroupModel, err error) FindJoinSuperGroup(ctx context.Context, userID string) (superGroup *unRelationTb.UserToSuperGroupModel, err error)
CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error
DeleteSuperGroup(ctx context.Context, groupID string) error DeleteSuperGroup(ctx context.Context, groupID string) error
DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error
@ -153,11 +153,11 @@ func (g *GroupController) PageGroupRequestUser(ctx context.Context, userID strin
return g.database.PageGroupRequestUser(ctx, userID, pageNumber, showNumber) return g.database.PageGroupRequestUser(ctx, userID, pageNumber, showNumber)
} }
func (g *GroupController) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) { func (g *GroupController) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error) {
return g.database.FindSuperGroup(ctx, groupIDs) return g.database.FindSuperGroup(ctx, groupIDs)
} }
func (g *GroupController) FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) { func (g *GroupController) FindJoinSuperGroup(ctx context.Context, userID string) (*unRelationTb.UserToSuperGroupModel, error) {
return g.database.FindJoinSuperGroup(ctx, userID) return g.database.FindJoinSuperGroup(ctx, userID)
} }
@ -203,8 +203,8 @@ type GroupDataBaseInterface interface {
TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error) TakeGroupRequest(ctx context.Context, groupID string, userID string) (*relationTb.GroupRequestModel, error)
PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error) PageGroupRequestUser(ctx context.Context, userID string, pageNumber, showNumber int32) (uint32, []*relationTb.GroupRequestModel, error)
// SuperGroup // SuperGroup
FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unRelationTb.SuperGroupModel, error)
FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) FindJoinSuperGroup(ctx context.Context, userID string) (*unRelationTb.UserToSuperGroupModel, error)
CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error CreateSuperGroup(ctx context.Context, groupID string, initMemberIDList []string) error
DeleteSuperGroup(ctx context.Context, groupID string) error DeleteSuperGroup(ctx context.Context, groupID string) error
DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error DeleteSuperGroupMember(ctx context.Context, groupID string, userIDs []string) error
@ -467,11 +467,11 @@ func (g *GroupDataBase) PageGroupRequestUser(ctx context.Context, userID string,
return g.groupRequestDB.Page(ctx, userID, pageNumber, showNumber) return g.groupRequestDB.Page(ctx, userID, pageNumber, showNumber)
} }
func (g *GroupDataBase) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*unrelationTb.SuperGroupModel, error) { func (g *GroupDataBase) FindSuperGroup(ctx context.Context, groupIDs []string) ([]*table.SuperGroupModel, error) {
return g.mongoDB.FindSuperGroup(ctx, groupIDs) return g.mongoDB.FindSuperGroup(ctx, groupIDs)
} }
func (g *GroupDataBase) FindJoinSuperGroup(ctx context.Context, userID string) (*unrelationTb.UserToSuperGroupModel, error) { func (g *GroupDataBase) FindJoinSuperGroup(ctx context.Context, userID string) (*table.UserToSuperGroupModel, error) {
return g.mongoDB.GetSuperGroupByUserID(ctx, userID) return g.mongoDB.GetSuperGroupByUserID(ctx, userID)
} }

View File

@ -1,76 +1,554 @@
package controller package controller
import ( import (
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db/cache"
unRelationTb "Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/common/db/unrelation"
"Open_IM/pkg/common/log"
"Open_IM/pkg/common/prome"
"Open_IM/pkg/common/tracelog"
"github.com/gogo/protobuf/sortkeys"
"sync"
//"Open_IM/pkg/common/log"
pbMsg "Open_IM/pkg/proto/msg" pbMsg "Open_IM/pkg/proto/msg"
"Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils"
"context" "context"
"encoding/json" "errors"
"github.com/go-redis/redis/v8"
"go.mongodb.org/mongo-driver/mongo"
"github.com/golang/protobuf/proto"
) )
type MsgInterface interface { type MsgInterface interface {
BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error // 批量插入消息到db
BatchInsertChat2Cache(ctx context.Context, insertID string, msgList []*pbMsg.MsgDataToMQ) (error, uint64) BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error
// 刪除redis中消息缓存
DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error
// logic delete // incrSeq然后批量插入缓存
DelMsgLogic(ctx context.Context, userID string, seqList []uint32) error BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error)
DelMsgBySeqListInOneDoc(ctx context.Context, docID string, seqList []uint32) (unExistSeqList []uint32, err error) // 删除消息 返回不存在的seqList
ReplaceMsgToBlankByIndex(docID string, index int) (replaceMaxSeq uint32, err error) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error)
ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error // 获取群ID或者UserID最新一条在db里面的消息
// 获取群ID或者UserID最新一条在mongo里面的消息 GetNewestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error)
GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) // 获取群ID或者UserID最老一条在db里面的消息
// 获取群ID或者UserID最老一条在mongo里面的消息 GetOldestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error)
GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) // 通过seqList获取db中写扩散消息
GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error)
GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) // 通过seqList获取大群在db里面的消息
GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error)
GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unExistSeqList []uint32, err error) // 删除用户所有消息/cache/db然后重置seq
SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error CleanUpUserMsgFromMongo(ctx context.Context, userID string) error
// 删除大群消息重置群成员最小群seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除 redis cache)
CleanUpUserMsgFromMongo(userID string, operationID string) error DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error
// 删除用户消息重置最小seq remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除redis cache)
DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error
} }
func NewMsgController() MsgDatabaseInterface { func NewMsgController(mgo *mongo.Client, rdb redis.UniversalClient) MsgInterface {
return MsgController return &MsgController{}
} }
type MsgController struct { type MsgController struct {
database MsgDatabase
}
func (m *MsgController) BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error {
return m.database.BatchInsertChat2DB(ctx, ID, msgList, currentMaxSeq)
}
func (m *MsgController) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error {
return m.database.DeleteMessageFromCache(ctx, userID, msgList)
}
func (m *MsgController) BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) {
return m.database.BatchInsertChat2Cache(ctx, sourceID, msgList)
}
func (m *MsgController) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) {
return m.database.DelMsgBySeqs(ctx, userID, seqs)
}
func (m *MsgController) GetNewestMsg(ctx context.Context, ID string) (msg *sdkws.MsgData, err error) {
return m.database.GetNewestMsg(ctx, ID)
}
func (m *MsgController) GetOldestMsg(ctx context.Context, ID string) (msg *sdkws.MsgData, err error) {
return m.database.GetOldestMsg(ctx, ID)
}
func (m *MsgController) GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) {
return m.database.GetMsgBySeqs(ctx, userID, seqs)
}
func (m *MsgController) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) {
return m.database.GetSuperGroupMsgBySeqs(ctx, groupID, seqs)
}
func (m *MsgController) CleanUpUserMsgFromMongo(ctx context.Context, userID string) error {
return m.database.CleanUpUserMsgFromMongo(ctx, userID)
}
func (m *MsgController) DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error {
return m.database.DeleteUserMsgsAndSetMinSeq(ctx, userID, remainTime)
}
func (m *MsgController) DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error {
return m.database.DeleteUserMsgsAndSetMinSeq(ctx, userID, remainTime)
} }
type MsgDatabaseInterface interface { type MsgDatabaseInterface interface {
BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error // 批量插入消息
BatchInsertChat2Cache(ctx context.Context, insertID string, msgList []*pbMsg.MsgDataToMQ) (error, uint64) BatchInsertChat2DB(ctx context.Context, ID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error
// 刪除redis中消息缓存
DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ) error
// logic delete // incrSeq然后批量插入缓存
DelMsgLogic(ctx context.Context, userID string, seqList []uint32) error BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error)
DelMsgBySeqListInOneDoc(ctx context.Context, docID string, seqList []uint32) (unExistSeqList []uint32, err error) // 删除消息 返回不存在的seqList
ReplaceMsgToBlankByIndex(docID string, index int) (replaceMaxSeq uint32, err error) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error)
ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error
// 获取群ID或者UserID最新一条在mongo里面的消息 // 获取群ID或者UserID最新一条在mongo里面的消息
GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) GetNewestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error)
// 获取群ID或者UserID最老一条在mongo里面的消息 // 获取群ID或者UserID最老一条在mongo里面的消息
GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) GetOldestMsg(ctx context.Context, sourceID string) (msg *sdkws.MsgData, err error)
// 通过seqList获取mongo中写扩散消息
GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error)
GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) // 通过seqList获取大群在 mongo里面的消息
GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unExistSeqList []uint32, err error) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error)
SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error
// 删除用户所有消息/redis/mongo然后重置seq // 删除用户所有消息/redis/mongo然后重置seq
CleanUpUserMsgFromMongo(userID string, operationID string) error CleanUpUserMsgFromMongo(ctx context.Context, userID string) error
} // 删除大群消息重置群成员最小群seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除 redis cache)
DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userID string, remainTime int64) error
func NewMsgDatabase() MsgDatabaseInterface { // 删除用户消息重置最小seq remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除redis cache)
return MsgDatabase DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error
} }
type MsgDatabase struct { type MsgDatabase struct {
msgModel unRelationTb.MsgDocModelInterface
msgCache cache.Cache
msg unRelationTb.MsgDocModel
} }
func (m *MsgDatabase) BatchInsertChat2DB(ctx context.Context, userID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error { func NewMsgDatabase(mgo *mongo.Client, rdb redis.UniversalClient) MsgDatabaseInterface {
return &MsgDatabase{}
} }
func (m *MsgDatabase) CleanUpUserMsgFromMongo(userID string, operationID string) error { func (db *MsgDatabase) BatchInsertChat2DB(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq uint64) error {
//newTime := utils.GetCurrentTimestampByMill()
if len(msgList) > db.msg.GetSingleGocMsgNum() {
return errors.New("too large")
}
var remain uint64
blk0 := uint64(db.msg.GetSingleGocMsgNum() - 1)
//currentMaxSeq 4998
if currentMaxSeq < uint64(db.msg.GetSingleGocMsgNum()) {
remain = blk0 - currentMaxSeq //1
} else {
excludeBlk0 := currentMaxSeq - blk0 //=1
//(5000-1)%5000 == 4999
remain = (uint64(db.msg.GetSingleGocMsgNum()) - (excludeBlk0 % uint64(db.msg.GetSingleGocMsgNum()))) % uint64(db.msg.GetSingleGocMsgNum())
}
//remain=1
insertCounter := uint64(0)
msgsToMongo := make([]unRelationTb.MsgInfoModel, 0)
msgsToMongoNext := make([]unRelationTb.MsgInfoModel, 0)
docID := ""
docIDNext := ""
var err error
for _, m := range msgList {
//log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
currentMaxSeq++
sMsg := unRelationTb.MsgInfoModel{}
sMsg.SendTime = m.MsgData.SendTime
m.MsgData.Seq = uint32(currentMaxSeq)
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return utils.Wrap(err, "")
}
if insertCounter < remain {
msgsToMongo = append(msgsToMongo, sMsg)
insertCounter++
docID = db.msg.GetDocID(sourceID, uint32(currentMaxSeq))
//log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
} else {
msgsToMongoNext = append(msgsToMongoNext, sMsg)
docIDNext = db.msg.GetDocID(sourceID, uint32(currentMaxSeq))
//log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
}
}
if docID != "" {
//filter := bson.M{"uid": seqUid}
//log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo, "userID: ", userID)
//err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgsToMongo}}}).Err()
err = db.msgModel.PushMsgsToDoc(ctx, docID, msgsToMongo)
if err != nil {
if err == mongo.ErrNoDocuments {
doc := &unRelationTb.MsgDocModel{}
doc.DocID = docID
doc.Msg = msgsToMongo
if err = db.msgModel.Create(ctx, doc); err != nil {
prome.PromeInc(prome.MsgInsertMongoFailedCounter)
//log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
prome.PromeInc(prome.MsgInsertMongoSuccessCounter)
} else {
prome.PromeInc(prome.MsgInsertMongoFailedCounter)
//log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
return utils.Wrap(err, "")
}
} else {
prome.PromeInc(prome.MsgInsertMongoSuccessCounter)
}
}
if docIDNext != "" {
nextDoc := &unRelationTb.MsgDocModel{}
nextDoc.DocID = docIDNext
nextDoc.Msg = msgsToMongoNext
//log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID)
if err = db.msgModel.Create(ctx, nextDoc); err != nil {
prome.PromeInc(prome.MsgInsertMongoFailedCounter)
//log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
prome.PromeInc(prome.MsgInsertMongoSuccessCounter)
}
//log.Debug(operationID, "batch mgo cost time ", mongo2.getCurrentTimestampByMill()-newTime, userID, len(msgList))
return nil
}
func (db *MsgDatabase) DeleteMessageFromCache(ctx context.Context, userID string, msgs []*pbMsg.MsgDataToMQ) error {
return db.msgCache.DeleteMessageFromCache(ctx, userID, msgs)
}
func (db *MsgDatabase) BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) (uint64, error) {
//newTime := utils.GetCurrentTimestampByMill()
lenList := len(msgList)
if lenList > db.msg.GetSingleGocMsgNum() {
return 0, errors.New("too large")
}
if lenList < 1 {
return 0, errors.New("too short as 0")
}
// judge sessionType to get seq
var currentMaxSeq uint64
var err error
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
currentMaxSeq, err = db.msgCache.GetGroupMaxSeq(ctx, sourceID)
//log.Debug(operationID, "constant.SuperGroupChatType lastMaxSeq before add ", currentMaxSeq, "userID ", sourceID, err)
} else {
currentMaxSeq, err = db.msgCache.GetUserMaxSeq(ctx, sourceID)
//log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", sourceID, err)
}
if err != nil && err != redis.Nil {
prome.PromeInc(prome.SeqGetFailedCounter)
return 0, utils.Wrap(err, "")
}
prome.PromeInc(prome.SeqGetSuccessCounter)
lastMaxSeq := currentMaxSeq
for _, m := range msgList {
currentMaxSeq++
m.MsgData.Seq = uint32(currentMaxSeq)
//log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", sourceID, "seq: ", currentMaxSeq)
}
//log.Debug(operationID, "SetMessageToCache ", sourceID, len(msgList))
failedNum, err := db.msgCache.SetMessageToCache(ctx, sourceID, msgList)
if err != nil {
prome.PromeAdd(prome.MsgInsertRedisFailedCounter, failedNum)
//log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), sourceID)
} else {
prome.PromeInc(prome.MsgInsertRedisSuccessCounter)
}
//log.Debug(operationID, "batch to redis cost time ", mongo2.getCurrentTimestampByMill()-newTime, sourceID, len(msgList))
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
err = db.msgCache.SetGroupMaxSeq(ctx, sourceID, currentMaxSeq)
} else {
err = db.msgCache.SetUserMaxSeq(ctx, sourceID, currentMaxSeq)
}
if err != nil {
prome.PromeInc(prome.SeqSetFailedCounter)
} else {
prome.PromeInc(prome.SeqSetSuccessCounter)
}
return lastMaxSeq, utils.Wrap(err, "")
}
func (db *MsgDatabase) DelMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (totalUnExistSeqs []uint32, err error) {
sortkeys.Uint32s(seqs)
docIDSeqsMap := db.msg.GetDocIDSeqsMap(userID, seqs)
lock := sync.Mutex{}
var wg sync.WaitGroup
wg.Add(len(docIDSeqsMap))
for k, v := range docIDSeqsMap {
go func(docID string, seqs []uint32) {
defer wg.Done()
unExistSeqList, err := db.DelMsgBySeqsInOneDoc(ctx, docID, seqs)
if err != nil {
return
}
lock.Lock()
totalUnExistSeqs = append(totalUnExistSeqs, unExistSeqList...)
lock.Unlock()
}(k, v)
}
return totalUnExistSeqs, nil
}
func (db *MsgDatabase) DelMsgBySeqsInOneDoc(ctx context.Context, docID string, seqs []uint32) (unExistSeqs []uint32, err error) {
seqMsgs, indexes, unExistSeqs, err := db.GetMsgAndIndexBySeqsInOneDoc(ctx, docID, seqs)
if err != nil {
return nil, err
}
for i, v := range seqMsgs {
if err = db.msgModel.UpdateMsgStatusByIndexInOneDoc(ctx, docID, v, indexes[i], constant.MsgDeleted); err != nil {
return nil, err
}
}
return unExistSeqs, nil
}
func (db *MsgDatabase) GetMsgAndIndexBySeqsInOneDoc(ctx context.Context, docID string, seqs []uint32) (seqMsgs []*sdkws.MsgData, indexes []int, unExistSeqs []uint32, err error) {
doc, err := db.msgModel.FindOneByDocID(ctx, docID)
if err != nil {
return nil, nil, nil, err
}
singleCount := 0
var hasSeqList []uint32
for i := 0; i < len(doc.Msg); i++ {
msgPb, err := db.unmarshalMsg(&doc.Msg[i])
if err != nil {
return nil, nil, nil, err
}
if utils.Contain(msgPb.Seq, seqs) {
indexes = append(indexes, i)
seqMsgs = append(seqMsgs, msgPb)
hasSeqList = append(hasSeqList, msgPb.Seq)
singleCount++
if singleCount == len(seqs) {
break
}
}
}
for _, i := range seqs {
if utils.Contain(i, hasSeqList) {
continue
}
unExistSeqs = append(unExistSeqs, i)
}
return seqMsgs, indexes, unExistSeqs, nil
}
func (db *MsgDatabase) GetNewestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) {
msgInfo, err := db.msgModel.GetNewestMsg(ctx, sourceID)
if err != nil {
return nil, err
}
return db.unmarshalMsg(msgInfo)
}
func (db *MsgDatabase) GetOldestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) {
msgInfo, err := db.msgModel.GetOldestMsg(ctx, sourceID)
if err != nil {
return nil, err
}
return db.unmarshalMsg(msgInfo)
}
func (db *MsgDatabase) unmarshalMsg(msgInfo *unRelationTb.MsgInfoModel) (msgPb *sdkws.MsgData, err error) {
msgPb = &sdkws.MsgData{}
err = proto.Unmarshal(msgInfo.Msg, msgPb)
if err != nil {
return nil, utils.Wrap(err, "")
}
return msgPb, nil
}
func (db *MsgDatabase) getMsgBySeqs(ctx context.Context, sourceID string, seqs []uint32, diffusionType int) (seqMsg []*sdkws.MsgData, err error) {
var hasSeqs []uint32
singleCount := 0
m := db.msg.GetDocIDSeqsMap(sourceID, seqs)
for docID, value := range m {
doc, err := db.msgModel.FindOneByDocID(ctx, docID)
if err != nil {
//log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
continue
}
singleCount = 0
for i := 0; i < len(doc.Msg); i++ {
msgPb, err := db.unmarshalMsg(&doc.Msg[i])
if err != nil {
//log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
return nil, err
}
if utils.Contain(msgPb.Seq, value) {
seqMsg = append(seqMsg, msgPb)
hasSeqs = append(hasSeqs, msgPb.Seq)
singleCount++
if singleCount == len(value) {
break
}
}
}
}
if len(hasSeqs) != len(seqs) {
var diff []uint32
var exceptionMsg []*sdkws.MsgData
diff = utils.Difference(hasSeqs, seqs)
if diffusionType == constant.WriteDiffusion {
exceptionMsg = db.msg.GenExceptionMessageBySeqs(diff)
} else if diffusionType == constant.ReadDiffusion {
exceptionMsg = db.msg.GenExceptionSuperGroupMessageBySeqs(diff, sourceID)
}
seqMsg = append(seqMsg, exceptionMsg...)
}
return seqMsg, nil
}
func (db *MsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) {
return db.getMsgBySeqs(ctx, userID, seqs, constant.WriteDiffusion)
}
func (db *MsgDatabase) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []uint32) (seqMsg []*sdkws.MsgData, err error) {
return db.getMsgBySeqs(ctx, groupID, seqs, constant.ReadDiffusion)
}
func (db *MsgDatabase) CleanUpUserMsgFromMongo(ctx context.Context, userID string) error {
maxSeq, err := db.msgCache.GetUserMaxSeq(ctx, userID)
if err == redis.Nil {
return nil
}
if err != nil {
return err
}
docIDs := db.msg.GetSeqDocIDList(userID, uint32(maxSeq))
err = db.msgModel.Delete(ctx, docIDs)
if err == mongo.ErrNoDocuments {
return nil
}
if err != nil {
return err
}
err = db.msgCache.SetUserMinSeq(ctx, userID, maxSeq)
return utils.Wrap(err, "")
}
func (db *MsgDatabase) DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userIDs []string, remainTime int64) error {
var delStruct delMsgRecursionStruct
minSeq, err := db.deleteMsgRecursion(ctx, groupID, unRelationTb.OldestList, &delStruct, remainTime)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), groupID, "deleteMsg failed")
}
if minSeq == 0 {
return nil
}
//log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDList:", delStruct, "minSeq", minSeq)
for _, userID := range userIDs {
userMinSeq, err := db.msgCache.GetGroupUserMinSeq(ctx, groupID, userID)
if err != nil && err != redis.Nil {
//log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error())
continue
}
if userMinSeq > uint64(minSeq) {
err = db.msgCache.SetGroupUserMinSeq(ctx, groupID, userID, userMinSeq)
} else {
err = db.msgCache.SetGroupUserMinSeq(ctx, groupID, userID, uint64(minSeq))
}
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userID, userMinSeq, minSeq)
}
}
return nil
}
func (db *MsgDatabase) DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error {
var delStruct delMsgRecursionStruct
minSeq, err := db.deleteMsgRecursion(ctx, userID, unRelationTb.OldestList, &delStruct, remainTime)
if err != nil {
return utils.Wrap(err, "")
}
if minSeq == 0 {
return nil
}
return db.msgCache.SetUserMinSeq(ctx, userID, uint64(minSeq))
}
// this is struct for recursion
type delMsgRecursionStruct struct {
minSeq uint32
delDocIDList []string
}
func (d *delMsgRecursionStruct) getSetMinSeq() uint32 {
return d.minSeq
}
// index 0....19(del) 20...69
// seq 70
// set minSeq 21
// recursion 删除list并且返回设置的最小seq
func (db *MsgDatabase) deleteMsgRecursion(ctx context.Context, sourceID string, index int64, delStruct *delMsgRecursionStruct, remainTime int64) (uint32, error) {
// find from oldest list
msgs, err := db.msgModel.GetMsgsByIndex(ctx, sourceID, index)
if err != nil || msgs.DocID == "" {
if err != nil {
if err == unrelation.ErrMsgListNotExist {
//log.NewInfo(operationID, utils.GetSelfFuncName(), "ID:", sourceID, "index:", index, err.Error())
} else {
//log.NewError(operationID, utils.GetSelfFuncName(), "GetUserMsgListByIndex failed", err.Error(), index, ID)
}
}
// 获取报错或者获取不到了物理删除并且返回seq delMongoMsgsPhysical(delStruct.delDocIDList)
err = db.msgModel.Delete(ctx, delStruct.delDocIDList)
if err != nil {
return 0, err
}
return delStruct.getSetMinSeq() + 1, nil
}
//log.NewDebug(operationID, "ID:", sourceID, "index:", index, "uid:", msgs.UID, "len:", len(msgs.Msg))
if len(msgs.Msg) > db.msg.GetSingleGocMsgNum() {
log.NewWarn(tracelog.GetOperationID(ctx), utils.GetSelfFuncName(), "msgs too large:", len(msgs.Msg), "docID:", msgs.DocID)
}
if msgs.Msg[len(msgs.Msg)-1].SendTime+(remainTime*1000) < utils.GetCurrentTimestampByMill() && msgs.IsFull() {
delStruct.delDocIDList = append(delStruct.delDocIDList, msgs.DocID)
lastMsgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
delStruct.minSeq = lastMsgPb.Seq
} else {
var hasMarkDelFlag bool
for _, msg := range msgs.Msg {
msgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msg.Msg, msgPb)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
if utils.GetCurrentTimestampByMill() > msg.SendTime+(remainTime*1000) {
msgPb.Status = constant.MsgDeleted
bytes, _ := proto.Marshal(msgPb)
msg.Msg = bytes
msg.SendTime = 0
hasMarkDelFlag = true
} else {
if err := db.msgModel.Delete(ctx, delStruct.delDocIDList); err != nil {
return 0, err
}
if hasMarkDelFlag {
if err := db.msgModel.UpdateOneDoc(ctx, msgs); err != nil {
return delStruct.getSetMinSeq(), utils.Wrap(err, "")
}
}
return msgPb.Seq + 1, nil
}
}
}
//log.NewDebug(operationID, sourceID, "continue to", delStruct)
// 继续递归 index+1
seq, err := db.deleteMsgRecursion(ctx, sourceID, index+1, delStruct, remainTime)
return seq, utils.Wrap(err, "deleteMsg failed")
} }

View File

@ -23,6 +23,8 @@ type UserInterface interface {
Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error) Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error)
//只要有一个存在就为true //只要有一个存在就为true
IsExist(ctx context.Context, userIDs []string) (exist bool, err error) IsExist(ctx context.Context, userIDs []string) (exist bool, err error)
//获取所有用户ID
GetAllUserID(ctx context.Context) ([]string, error)
} }
type UserController struct { type UserController struct {
@ -55,6 +57,11 @@ func (u *UserController) Page(ctx context.Context, pageNumber, showNumber int32)
func (u *UserController) IsExist(ctx context.Context, userIDs []string) (exist bool, err error) { func (u *UserController) IsExist(ctx context.Context, userIDs []string) (exist bool, err error) {
return u.database.IsExist(ctx, userIDs) return u.database.IsExist(ctx, userIDs)
} }
func (u *UserController) GetAllUserID(ctx context.Context) ([]string, error) {
return u.database.GetAllUserID(ctx)
}
func NewUserController(db *gorm.DB) *UserController { func NewUserController(db *gorm.DB) *UserController {
controller := &UserController{database: newUserDatabase(db)} controller := &UserController{database: newUserDatabase(db)}
return controller return controller
@ -75,6 +82,8 @@ type UserDatabaseInterface interface {
Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error) Page(ctx context.Context, pageNumber, showNumber int32) (users []*relationTb.UserModel, count int64, err error)
//只要有一个存在就为true //只要有一个存在就为true
IsExist(ctx context.Context, userIDs []string) (exist bool, err error) IsExist(ctx context.Context, userIDs []string) (exist bool, err error)
//获取所有用户ID
GetAllUserID(ctx context.Context) ([]string, error)
} }
type UserDatabase struct { type UserDatabase struct {
@ -138,3 +147,7 @@ func (u *UserDatabase) IsExist(ctx context.Context, userIDs []string) (exist boo
} }
return false, nil return false, nil
} }
func (u *UserDatabase) GetAllUserID(ctx context.Context) ([]string, error) {
return u.user.GetAllUserID(ctx)
}

View File

@ -100,3 +100,10 @@ func (u *UserGorm) Page(ctx context.Context, pageNumber, showNumber int32, tx ..
err = utils.Wrap(getDBConn(u.DB, tx).Limit(int(showNumber)).Offset(int(pageNumber*showNumber)).Find(&users).Error, "") err = utils.Wrap(getDBConn(u.DB, tx).Limit(int(showNumber)).Offset(int(pageNumber*showNumber)).Find(&users).Error, "")
return return
} }
// 获取所有用户ID
func (u *UserGorm) GetAllUserID(ctx context.Context) ([]string, error) {
var userIDs []string
err := u.DB.Pluck("user_id", &userIDs).Error
return userIDs, err
}

View File

@ -1,7 +1,7 @@
package unrelation package unrelation
import ( import (
common "Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"context" "context"
"strconv" "strconv"
"strings" "strings"
@ -14,12 +14,12 @@ const (
) )
type ExtendMsgSetModel struct { type ExtendMsgSetModel struct {
SourceID string `bson:"source_id" json:"sourceID"` SourceID string `bson:"source_id" json:"sourceID"`
SessionType int32 `bson:"session_type" json:"sessionType"` SessionType int32 `bson:"session_type" json:"sessionType"`
ExtendMsgs map[string]ExtendMsg `bson:"extend_msgs" json:"extendMsgs"` ExtendMsgs map[string]ExtendMsgModel `bson:"extend_msgs" json:"extendMsgs"`
ExtendMsgNum int32 `bson:"extend_msg_num" json:"extendMsgNum"` ExtendMsgNum int32 `bson:"extend_msg_num" json:"extendMsgNum"`
CreateTime int64 `bson:"create_time" json:"createTime"` // this block's create time CreateTime int64 `bson:"create_time" json:"createTime"` // this block's create time
MaxMsgUpdateTime int64 `bson:"max_msg_update_time" json:"maxMsgUpdateTime"` // index find msg MaxMsgUpdateTime int64 `bson:"max_msg_update_time" json:"maxMsgUpdateTime"` // index find msg
} }
type KeyValueModel struct { type KeyValueModel struct {
@ -28,7 +28,7 @@ type KeyValueModel struct {
LatestUpdateTime int64 `bson:"latest_update_time" json:"latestUpdateTime"` LatestUpdateTime int64 `bson:"latest_update_time" json:"latestUpdateTime"`
} }
type ExtendMsg struct { type ExtendMsgModel struct {
ReactionExtensionList map[string]KeyValueModel `bson:"reaction_extension_list" json:"reactionExtensionList"` ReactionExtensionList map[string]KeyValueModel `bson:"reaction_extension_list" json:"reactionExtensionList"`
ClientMsgID string `bson:"client_msg_id" json:"clientMsgID"` ClientMsgID string `bson:"client_msg_id" json:"clientMsgID"`
MsgFirstModifyTime int64 `bson:"msg_first_modify_time" json:"msgFirstModifyTime"` // this extendMsg create time MsgFirstModifyTime int64 `bson:"msg_first_modify_time" json:"msgFirstModifyTime"` // this extendMsg create time
@ -36,6 +36,16 @@ type ExtendMsg struct {
Ex string `bson:"ex" json:"ex"` Ex string `bson:"ex" json:"ex"`
} }
type ExtendMsgSetModelInterface interface {
CreateExtendMsgSet(ctx context.Context, set *ExtendMsgSetModel) error
GetAllExtendMsgSet(ctx context.Context, sourceID string, opts *GetAllExtendMsgSetOpts) (sets []*ExtendMsgSetModel, err error)
GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*ExtendMsgSetModel, error)
InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *ExtendMsgModel) error
InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
TakeExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *ExtendMsgModel, err error)
}
func (ExtendMsgSetModel) TableName() string { func (ExtendMsgSetModel) TableName() string {
return CExtendMsgSet return CExtendMsgSet
} }
@ -57,13 +67,3 @@ func (e *ExtendMsgSetModel) SplitSourceIDAndGetIndex() int32 {
type GetAllExtendMsgSetOpts struct { type GetAllExtendMsgSetOpts struct {
ExcludeExtendMsgs bool ExcludeExtendMsgs bool
} }
type ExtendMsgSetInterface interface {
CreateExtendMsgSet(ctx context.Context, set *ExtendMsgSetModel) error
GetAllExtendMsgSet(ctx context.Context, ID string, opts *GetAllExtendMsgSetOpts) (sets []*ExtendMsgSetModel, err error)
GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*ExtendMsgSetModel, error)
InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *ExtendMsg) error
InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*common.KeyValue) error
DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*common.KeyValue) error
GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *ExtendMsg, err error)
}

View File

@ -3,15 +3,19 @@ package unrelation
import ( import (
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"context"
"strconv" "strconv"
"strings"
) )
const ( const (
singleGocMsgNum = 5000 singleGocMsgNum = 5000
CChat = "msg" CChat = "msg"
OldestList = 0
NewestList = -1
) )
type UserMsgDocModel struct { type MsgDocModel struct {
DocID string `bson:"uid"` DocID string `bson:"uid"`
Msg []MsgInfoModel `bson:"msg"` Msg []MsgInfoModel `bson:"msg"`
} }
@ -21,53 +25,79 @@ type MsgInfoModel struct {
Msg []byte `bson:"msg"` Msg []byte `bson:"msg"`
} }
func (UserMsgDocModel) TableName() string { type MsgDocModelInterface interface {
PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []MsgInfoModel) error
Create(ctx context.Context, model *MsgDocModel) error
UpdateMsgStatusByIndexInOneDoc(ctx context.Context, docID string, msg *sdkws.MsgData, seqIndex int, status int32) error
FindOneByDocID(ctx context.Context, docID string) (*MsgDocModel, error)
GetNewestMsg(ctx context.Context, sourceID string) (*MsgInfoModel, error)
GetOldestMsg(ctx context.Context, sourceID string) (*MsgInfoModel, error)
Delete(ctx context.Context, docIDs []string) error
GetMsgsByIndex(ctx context.Context, sourceID string, index int64) (*MsgDocModel, error)
UpdateOneDoc(ctx context.Context, msg *MsgDocModel) error
}
func (MsgDocModel) TableName() string {
return CChat return CChat
} }
func (UserMsgDocModel) GetSingleDocMsgNum() int { func (MsgDocModel) GetSingleGocMsgNum() int {
return singleGocMsgNum return singleGocMsgNum
} }
func (u UserMsgDocModel) getSeqUid(uid string, seq uint32) string { func (m *MsgDocModel) IsFull() bool {
seqSuffix := seq / singleGocMsgNum index, _ := strconv.Atoi(strings.Split(m.DocID, ":")[1])
return u.indexGen(uid, seqSuffix) if index == 0 {
} if len(m.Msg) >= singleGocMsgNum-1 {
return true
func (u UserMsgDocModel) getSeqUserIDList(userID string, maxSeq uint32) []string { }
seqMaxSuffix := maxSeq / singleGocMsgNum
var seqUserIDList []string
for i := 0; i <= int(seqMaxSuffix); i++ {
seqUserID := u.indexGen(userID, uint32(i))
seqUserIDList = append(seqUserIDList, seqUserID)
} }
return seqUserIDList if len(m.Msg) >= singleGocMsgNum {
return true
}
return false
} }
func (UserMsgDocModel) getSeqSuperGroupID(groupID string, seq uint32) string { func (m MsgDocModel) GetDocID(sourceID string, seq uint32) string {
seqSuffix := seq / singleGocMsgNum seqSuffix := seq / singleGocMsgNum
return superGroupIndexGen(groupID, seqSuffix) return m.indexGen(sourceID, seqSuffix)
} }
func (u UserMsgDocModel) GetSeqUid(uid string, seq uint32) string { func (m MsgDocModel) GetSeqDocIDList(userID string, maxSeq uint32) []string {
return u.getSeqUid(uid, seq) seqMaxSuffix := maxSeq / singleGocMsgNum
var seqUserIDs []string
for i := 0; i <= int(seqMaxSuffix); i++ {
seqUserID := m.indexGen(userID, uint32(i))
seqUserIDs = append(seqUserIDs, seqUserID)
}
return seqUserIDs
} }
func (u UserMsgDocModel) GetDocIDSeqsMap(uid string, seqs []uint32) map[string][]uint32 { func (m MsgDocModel) getSeqSuperGroupID(groupID string, seq uint32) string {
seqSuffix := seq / singleGocMsgNum
return m.superGroupIndexGen(groupID, seqSuffix)
}
func (m MsgDocModel) superGroupIndexGen(groupID string, seqSuffix uint32) string {
return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10)
}
func (m MsgDocModel) GetDocIDSeqsMap(sourceID string, seqs []uint32) map[string][]uint32 {
t := make(map[string][]uint32) t := make(map[string][]uint32)
for i := 0; i < len(seqs); i++ { for i := 0; i < len(seqs); i++ {
seqUid := u.getSeqUid(uid, seqs[i]) docID := m.GetDocID(sourceID, seqs[i])
if value, ok := t[seqUid]; !ok { if value, ok := t[docID]; !ok {
var temp []uint32 var temp []uint32
t[seqUid] = append(temp, seqs[i]) t[docID] = append(temp, seqs[i])
} else { } else {
t[seqUid] = append(value, seqs[i]) t[docID] = append(value, seqs[i])
} }
} }
return t return t
} }
func (UserMsgDocModel) getMsgIndex(seq uint32) int { func (m MsgDocModel) getMsgIndex(seq uint32) int {
seqSuffix := seq / singleGocMsgNum seqSuffix := seq / singleGocMsgNum
var index uint32 var index uint32
if seqSuffix == 0 { if seqSuffix == 0 {
@ -78,12 +108,12 @@ func (UserMsgDocModel) getMsgIndex(seq uint32) int {
return int(index) return int(index)
} }
func (UserMsgDocModel) indexGen(uid string, seqSuffix uint32) string { func (m MsgDocModel) indexGen(sourceID string, seqSuffix uint32) string {
return uid + ":" + strconv.FormatInt(int64(seqSuffix), 10) return sourceID + ":" + strconv.FormatInt(int64(seqSuffix), 10)
} }
func (UserMsgDocModel) genExceptionMessageBySeqList(seqList []uint32) (exceptionMsg []*sdkws.MsgData) { func (MsgDocModel) GenExceptionMessageBySeqs(seqs []uint32) (exceptionMsg []*sdkws.MsgData) {
for _, v := range seqList { for _, v := range seqs {
msg := new(sdkws.MsgData) msg := new(sdkws.MsgData)
msg.Seq = v msg.Seq = v
exceptionMsg = append(exceptionMsg, msg) exceptionMsg = append(exceptionMsg, msg)
@ -91,8 +121,8 @@ func (UserMsgDocModel) genExceptionMessageBySeqList(seqList []uint32) (exception
return exceptionMsg return exceptionMsg
} }
func (UserMsgDocModel) genExceptionSuperGroupMessageBySeqList(seqList []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) { func (MsgDocModel) GenExceptionSuperGroupMessageBySeqs(seqs []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) {
for _, v := range seqList { for _, v := range seqs {
msg := new(sdkws.MsgData) msg := new(sdkws.MsgData)
msg.Seq = v msg.Seq = v
msg.GroupID = groupID msg.GroupID = groupID

View File

@ -37,7 +37,3 @@ type SuperGroupModelInterface interface {
DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error
RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error
} }
func superGroupIndexGen(groupID string, seqSuffix uint32) string {
return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10)
}

View File

@ -1,171 +0,0 @@
package unrelation
import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
pbMsg "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
"context"
"errors"
go_redis "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
func (d *db.DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string, currentMaxSeq uint64) error {
newTime := getCurrentTimestampByMill()
if len(msgList) > GetSingleGocMsgNum() {
return errors.New("too large")
}
isInit := false
var remain uint64
blk0 := uint64(GetSingleGocMsgNum() - 1)
//currentMaxSeq 4998
if currentMaxSeq < uint64(mongo2.GetSingleGocMsgNum()) {
remain = blk0 - currentMaxSeq //1
} else {
excludeBlk0 := currentMaxSeq - blk0 //=1
//(5000-1)%5000 == 4999
remain = (uint64(mongo2.GetSingleGocMsgNum()) - (excludeBlk0 % uint64(mongo2.GetSingleGocMsgNum()))) % uint64(mongo2.GetSingleGocMsgNum())
}
//remain=1
insertCounter := uint64(0)
msgListToMongo := make([]mongo2.MsgInfo, 0)
msgListToMongoNext := make([]mongo2.MsgInfo, 0)
seqUid := ""
seqUidNext := ""
log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList))
var err error
for _, m := range msgList {
log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
currentMaxSeq++
sMsg := mongo2.MsgInfo{}
sMsg.SendTime = m.MsgData.SendTime
m.MsgData.Seq = uint32(currentMaxSeq)
log.Debug(operationID, "mongo msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", userID, "seq: ", currentMaxSeq)
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return utils.Wrap(err, "")
}
if isInit {
msgListToMongoNext = append(msgListToMongoNext, sMsg)
seqUidNext = mongo2.getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
continue
}
if insertCounter < remain {
msgListToMongo = append(msgListToMongo, sMsg)
insertCounter++
seqUid = mongo2.getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
} else {
msgListToMongoNext = append(msgListToMongoNext, sMsg)
seqUidNext = mongo2.getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
}
}
ctx := context.Background()
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(mongo2.cChat)
if seqUid != "" {
filter := bson.M{"uid": seqUid}
log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo, "userID: ", userID)
err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
if err != nil {
if err == mongo.ErrNoDocuments {
filter := bson.M{"uid": seqUid}
sChat := mongo2.UserChat{}
sChat.UID = seqUid
sChat.Msg = msgListToMongo
log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter)
} else {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
return utils.Wrap(err, "")
}
} else {
promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter)
}
}
if seqUidNext != "" {
filter := bson.M{"uid": seqUidNext}
sChat := mongo2.UserChat{}
sChat.UID = seqUidNext
sChat.Msg = msgListToMongoNext
log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter)
}
log.Debug(operationID, "batch mgo cost time ", mongo2.getCurrentTimestampByMill()-newTime, userID, len(msgList))
return nil
}
func (d *db.DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgDataToMQ, operationID string) (error, uint64) {
newTime := mongo2.getCurrentTimestampByMill()
lenList := len(msgList)
if lenList > mongo2.GetSingleGocMsgNum() {
return errors.New("too large"), 0
}
if lenList < 1 {
return errors.New("too short as 0"), 0
}
// judge sessionType to get seq
var currentMaxSeq uint64
var err error
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
currentMaxSeq, err = d.GetGroupMaxSeq(insertID)
log.Debug(operationID, "constant.SuperGroupChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err)
} else {
currentMaxSeq, err = d.GetUserMaxSeq(insertID)
log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err)
}
if err != nil && err != go_redis.Nil {
promePkg.PromeInc(promePkg.SeqGetFailedCounter)
return utils.Wrap(err, ""), 0
}
promePkg.PromeInc(promePkg.SeqGetSuccessCounter)
lastMaxSeq := currentMaxSeq
for _, m := range msgList {
currentMaxSeq++
sMsg := mongo2.MsgInfo{}
sMsg.SendTime = m.MsgData.SendTime
m.MsgData.Seq = uint32(currentMaxSeq)
log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", insertID, "seq: ", currentMaxSeq)
}
log.Debug(operationID, "SetMessageToCache ", insertID, len(msgList))
err, failedNum := d.SetMessageToCache(msgList, insertID, operationID)
if err != nil {
promePkg.PromeAdd(promePkg.MsgInsertRedisFailedCounter, failedNum)
log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), insertID)
} else {
promePkg.PromeInc(promePkg.MsgInsertRedisSuccessCounter)
}
log.Debug(operationID, "batch to redis cost time ", mongo2.getCurrentTimestampByMill()-newTime, insertID, len(msgList))
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
err = d.SetGroupMaxSeq(insertID, currentMaxSeq)
} else {
err = d.SetUserMaxSeq(insertID, currentMaxSeq)
}
if err != nil {
promePkg.PromeInc(promePkg.SeqSetFailedCounter)
} else {
promePkg.PromeInc(promePkg.SeqSetSuccessCounter)
}
return utils.Wrap(err, ""), lastMaxSeq
}

View File

@ -1,7 +1,7 @@
package unrelation package unrelation
import ( import (
"Open_IM/pkg/common/db/table/unrelation" unRelationTb "Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
@ -19,19 +19,15 @@ type ExtendMsgSetMongoDriver struct {
} }
func NewExtendMsgSetMongoDriver(mgoDB *mongo.Database) *ExtendMsgSetMongoDriver { func NewExtendMsgSetMongoDriver(mgoDB *mongo.Database) *ExtendMsgSetMongoDriver {
return &ExtendMsgSetMongoDriver{mgoDB: mgoDB, ExtendMsgSetCollection: mgoDB.Collection(unrelation.CExtendMsgSet)} return &ExtendMsgSetMongoDriver{mgoDB: mgoDB, ExtendMsgSetCollection: mgoDB.Collection(unRelationTb.CExtendMsgSet)}
} }
func (e *ExtendMsgSetMongoDriver) CreateExtendMsgSet(ctx context.Context, set *unrelation.ExtendMsgSet) error { func (e *ExtendMsgSetMongoDriver) CreateExtendMsgSet(ctx context.Context, set *unRelationTb.ExtendMsgSetModel) error {
_, err := e.ExtendMsgSetCollection.InsertOne(ctx, set) _, err := e.ExtendMsgSetCollection.InsertOne(ctx, set)
return err return err
} }
type GetAllExtendMsgSetOpts struct { func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID string, opts *unRelationTb.GetAllExtendMsgSetOpts) (sets []*unRelationTb.ExtendMsgSetModel, err error) {
ExcludeExtendMsgs bool
}
func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID string, opts *GetAllExtendMsgSetOpts) (sets []*unrelation.ExtendMsgSet, err error) {
regex := fmt.Sprintf("^%s", ID) regex := fmt.Sprintf("^%s", ID)
var findOpts *options.FindOptions var findOpts *options.FindOptions
if opts != nil { if opts != nil {
@ -51,7 +47,7 @@ func (e *ExtendMsgSetMongoDriver) GetAllExtendMsgSet(ctx context.Context, ID str
return sets, nil return sets, nil
} }
func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unrelation.ExtendMsgSet, error) { func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, maxMsgUpdateTime int64) (*unRelationTb.ExtendMsgSetModel, error) {
var err error var err error
findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{"extend_msgs": 0}) findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{"extend_msgs": 0})
// update newest // update newest
@ -63,7 +59,7 @@ func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID
if err != nil { if err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
var setList []unrelation.ExtendMsgSet var setList []unRelationTb.ExtendMsgSetModel
if err := result.All(ctx, &setList); err != nil { if err := result.All(ctx, &setList); err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
@ -74,7 +70,7 @@ func (e *ExtendMsgSetMongoDriver) GetExtendMsgSet(ctx context.Context, sourceID
} }
// first modify msg // first modify msg
func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unrelation.ExtendMsg) error { func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID string, sessionType int32, msg *unRelationTb.ExtendMsgModel) error {
set, err := e.GetExtendMsgSet(ctx, sourceID, sessionType, 0) set, err := e.GetExtendMsgSet(ctx, sourceID, sessionType, 0)
if err != nil { if err != nil {
return utils.Wrap(err, "") return utils.Wrap(err, "")
@ -84,10 +80,10 @@ func (e *ExtendMsgSetMongoDriver) InsertExtendMsg(ctx context.Context, sourceID
if set != nil { if set != nil {
index = set.SplitSourceIDAndGetIndex() index = set.SplitSourceIDAndGetIndex()
} }
err = e.CreateExtendMsgSet(ctx, &unrelation.ExtendMsgSet{ err = e.CreateExtendMsgSet(ctx, &unRelationTb.ExtendMsgSetModel{
SourceID: set.GetSourceID(sourceID, index), SourceID: set.GetSourceID(sourceID, index),
SessionType: sessionType, SessionType: sessionType,
ExtendMsgs: map[string]unrelation.ExtendMsg{msg.ClientMsgID: *msg}, ExtendMsgs: map[string]unRelationTb.ExtendMsgModel{msg.ClientMsgID: *msg},
ExtendMsgNum: 1, ExtendMsgNum: 1,
CreateTime: msg.MsgFirstModifyTime, CreateTime: msg.MsgFirstModifyTime,
MaxMsgUpdateTime: msg.MsgFirstModifyTime, MaxMsgUpdateTime: msg.MsgFirstModifyTime,
@ -136,14 +132,14 @@ func (e *ExtendMsgSetMongoDriver) DeleteReactionExtendMsgSet(ctx context.Context
return err return err
} }
func (e *ExtendMsgSetMongoDriver) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unrelation.ExtendMsg, err error) { func (e *ExtendMsgSetMongoDriver) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (extendMsg *unRelationTb.ExtendMsgModel, err error) {
findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{fmt.Sprintf("extend_msgs.%s", clientMsgID): 1}) findOpts := options.Find().SetLimit(1).SetSkip(0).SetSort(bson.M{"source_id": -1}).SetProjection(bson.M{fmt.Sprintf("extend_msgs.%s", clientMsgID): 1})
regex := fmt.Sprintf("^%s", sourceID) regex := fmt.Sprintf("^%s", sourceID)
result, err := e.ExtendMsgSetCollection.Find(ctx, bson.M{"source_id": primitive.Regex{Pattern: regex}, "session_type": sessionType, "max_msg_update_time": bson.M{"$lte": maxMsgUpdateTime}}, findOpts) result, err := e.ExtendMsgSetCollection.Find(ctx, bson.M{"source_id": primitive.Regex{Pattern: regex}, "session_type": sessionType, "max_msg_update_time": bson.M{"$lte": maxMsgUpdateTime}}, findOpts)
if err != nil { if err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
var setList []unrelation.ExtendMsgSet var setList []unRelationTb.ExtendMsgSetModel
if err := result.All(ctx, &setList); err != nil { if err := result.All(ctx, &setList); err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }

View File

@ -62,17 +62,17 @@ func (m *Mongo) GetClient() *mongo.Client {
} }
func (m *Mongo) CreateMsgIndex() { func (m *Mongo) CreateMsgIndex() {
if err := m.createMongoIndex(unrelation, false, "uid"); err != nil { if err := m.createMongoIndex(unrelation.CChat, false, "uid"); err != nil {
fmt.Println(err.Error() + " index create failed " + unrelation.CChat + " uid, please create index by yourself in field uid") fmt.Println(err.Error() + " index create failed " + unrelation.CChat + " uid, please create index by yourself in field uid")
} }
} }
func (m *Mongo) CreateSuperGroupIndex() { func (m *Mongo) CreateSuperGroupIndex() {
if err := m.createMongoIndex(unrelation.CSuperGroup, true, "group_id"); err != nil { if err := m.createMongoIndex(unrelation.CSuperGroup, true, "group_id"); err != nil {
panic(err.Error() + "index create failed " + unrelation.CTag + " group_id") panic(err.Error() + "index create failed " + unrelation.CSuperGroup + " group_id")
} }
if err := m.createMongoIndex(unrelation.CUserToSuperGroup, true, "user_id"); err != nil { if err := m.createMongoIndex(unrelation.CUserToSuperGroup, true, "user_id"); err != nil {
panic(err.Error() + "index create failed " + unrelation.CTag + "user_id") panic(err.Error() + "index create failed " + unrelation.CUserToSuperGroup + "user_id")
} }
} }

View File

@ -1,670 +0,0 @@
package unrelation
// deleteMsgByLogic
//func (d *db.DataBases) DelMsgBySeqList(userID string, seqList []uint32, operationID string) (totalUnexistSeqList []uint32, err error) {
// log.Debug(operationID, utils.GetSelfFuncName(), "args ", userID, seqList)
// sortkeys.Uint32s(seqList)
// suffixUserID2SubSeqList := func(uid string, seqList []uint32) map[string][]uint32 {
// t := make(map[string][]uint32)
// for i := 0; i < len(seqList); i++ {
// seqUid := getSeqUid(uid, seqList[i])
// if value, ok := t[seqUid]; !ok {
// var temp []uint32
// t[seqUid] = append(temp, seqList[i])
// } else {
// t[seqUid] = append(value, seqList[i])
// }
// }
// return t
// }(userID, seqList)
//
// lock := sync.Mutex{}
// var wg sync.WaitGroup
// wg.Add(len(suffixUserID2SubSeqList))
// for k, v := range suffixUserID2SubSeqList {
// go func(suffixUserID string, subSeqList []uint32, operationID string) {
// defer wg.Done()
// unexistSeqList, err := d.DelMsgBySeqListInOneDoc(suffixUserID, subSeqList, operationID)
// if err != nil {
// log.Error(operationID, "DelMsgBySeqListInOneDoc failed ", err.Error(), suffixUserID, subSeqList)
// return
// }
// lock.Lock()
// totalUnexistSeqList = append(totalUnexistSeqList, unexistSeqList...)
// lock.Unlock()
// }(k, v, operationID)
// }
// return totalUnexistSeqList, err
//}
//
//func (d *db.DataBases) DelMsgBySeqListInOneDoc(suffixUserID string, seqList []uint32, operationID string) ([]uint32, error) {
// log.Debug(operationID, utils.GetSelfFuncName(), "args ", suffixUserID, seqList)
// seqMsgList, indexList, unexistSeqList, err := d.GetMsgAndIndexBySeqListInOneMongo2(suffixUserID, seqList, operationID)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// for i, v := range seqMsgList {
// if err := d.ReplaceMsgByIndex(suffixUserID, v, operationID, indexList[i]); err != nil {
// return nil, utils.Wrap(err, "")
// }
// }
// return unexistSeqList, nil
//}
// deleteMsgByLogic
//func (d *db.DataBases) DelMsgLogic(uid string, seqList []uint32, operationID string) error {
// sortkeys.Uint32s(seqList)
// seqMsgs, err := d.GetMsgBySeqListMongo2(uid, seqList, operationID)
// if err != nil {
// return utils.Wrap(err, "")
// }
// for _, seqMsg := range seqMsgs {
// log.NewDebug(operationID, utils.GetSelfFuncName(), *seqMsg)
// seqMsg.Status = constant.MsgDeleted
// if err = d.ReplaceMsgBySeq(uid, seqMsg, operationID); err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), "ReplaceMsgListBySeq error", err.Error())
// }
// }
// return nil
//}
//func (d *db.DataBases) ReplaceMsgByIndex(suffixUserID string, msg *sdkws.MsgData, operationID string, seqIndex int) error {
// log.NewInfo(operationID, utils.GetSelfFuncName(), suffixUserID, *msg)
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// s := fmt.Sprintf("msg.%d.msg", seqIndex)
// log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s)
// msg.Status = constant.MsgDeleted
// bytes, err := proto.Marshal(msg)
// if err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal failed ", err.Error(), msg.String())
// return utils.Wrap(err, "")
// }
// updateResult, err := c.UpdateOne(ctx, bson.M{"uid": suffixUserID}, bson.M{"$set": bson.M{s: bytes}})
// log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult)
// if err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error())
// return utils.Wrap(err, "")
// }
// return nil
//}
//func (d *db.DataBases) ReplaceMsgBySeq(uid string, msg *sdkws.MsgData, operationID string) error {
// log.NewInfo(operationID, utils.GetSelfFuncName(), uid, *msg)
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// uid = getSeqUid(uid, msg.Seq)
// seqIndex := getMsgIndex(msg.Seq)
// s := fmt.Sprintf("msg.%d.msg", seqIndex)
// log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s)
// bytes, err := proto.Marshal(msg)
// if err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal", err.Error())
// return utils.Wrap(err, "")
// }
//
// updateResult, err := c.UpdateOne(
// ctx, bson.M{"uid": uid},
// bson.M{"$set": bson.M{s: bytes}})
// log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult)
// if err != nil {
// log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error())
// return utils.Wrap(err, "")
// }
// return nil
//}
//
//func (d *db.DataBases) UpdateOneMsgList(msg *UserChat) error {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// _, err := c.UpdateOne(ctx, bson.M{"uid": msg.UID}, bson.M{"$set": bson.M{"msg": msg.Msg}})
// return err
//}
//
//func (d *db.DataBases) GetMsgBySeqList(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
// log.NewInfo(operationID, utils.GetSelfFuncName(), uid, seqList)
// var hasSeqList []uint32
// singleCount := 0
// session := d.mgoSession.Clone()
// if session == nil {
// return nil, errors.New("session == nil")
// }
// defer session.Close()
// c := session.DB(config.Config.Mongo.DBDatabase).C(cChat)
// m := func(uid string, seqList []uint32) map[string][]uint32 {
// t := make(map[string][]uint32)
// for i := 0; i < len(seqList); i++ {
// seqUid := getSeqUid(uid, seqList[i])
// if value, ok := t[seqUid]; !ok {
// var temp []uint32
// t[seqUid] = append(temp, seqList[i])
// } else {
// t[seqUid] = append(value, seqList[i])
// }
// }
// return t
// }(uid, seqList)
// sChat := UserChat{}
// for seqUid, value := range m {
// if err = c.Find(bson.M{"uid": seqUid}).One(&sChat); err != nil {
// log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
// continue
// }
// singleCount = 0
// for i := 0; i < len(sChat.Msg); i++ {
// msg := new(sdkws.MsgData)
// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
// log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
// return nil, err
// }
// if isContainInt32(msg.Seq, value) {
// seqMsg = append(seqMsg, msg)
// hasSeqList = append(hasSeqList, msg.Seq)
// singleCount++
// if singleCount == len(value) {
// break
// }
// }
// }
// }
// if len(hasSeqList) != len(seqList) {
// var diff []uint32
// diff = utils.Difference(hasSeqList, seqList)
// exceptionMSg := genExceptionMessageBySeqList(diff)
// seqMsg = append(seqMsg, exceptionMSg...)
//
// }
// return seqMsg, nil
//}
//
//func (d *db.DataBases) GetUserMsgListByIndex(ID string, index int64) (*UserChat, error) {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// regex := fmt.Sprintf("^%s", ID)
// findOpts := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"uid": 1})
// var msgs []UserChat
// //primitive.Regex{Pattern: regex}
// cursor, err := c.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: regex}}, findOpts)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// err = cursor.All(context.Background(), &msgs)
// if err != nil {
// return nil, utils.Wrap(err, fmt.Sprintf("cursor is %s", cursor.Current.String()))
// }
// if len(msgs) > 0 {
// return &msgs[0], nil
// } else {
// return nil, ErrMsgListNotExist
// }
//}
//
//func (d *db.DataBases) DelMongoMsgs(IDList []string) error {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// _, err := c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": IDList}})
// return err
//}
//
//func (d *db.DataBases) ReplaceMsgToBlankByIndex(suffixID string, index int) (replaceMaxSeq uint32, err error) {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// userChat := &UserChat{}
// err = c.FindOne(ctx, bson.M{"uid": suffixID}).Decode(&userChat)
// if err != nil {
// return 0, err
// }
// for i, msg := range userChat.Msg {
// if i <= index {
// msgPb := &sdkws.MsgData{}
// if err = proto.Unmarshal(msg.Msg, msgPb); err != nil {
// continue
// }
// newMsgPb := &sdkws.MsgData{Seq: msgPb.Seq}
// bytes, err := proto.Marshal(newMsgPb)
// if err != nil {
// continue
// }
// msg.Msg = bytes
// msg.SendTime = 0
// replaceMaxSeq = msgPb.Seq
// }
// }
// _, err = c.UpdateOne(ctx, bson.M{"uid": suffixID}, bson.M{"$set": bson.M{"msg": userChat.Msg}})
// return replaceMaxSeq, err
//}
//
//func (d *db.DataBases) GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// regex := fmt.Sprintf("^%s", ID)
// findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": -1})
// var userChats []UserChat
// cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts)
// if err != nil {
// return nil, err
// }
// err = cursor.All(ctx, &userChats)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// if len(userChats) > 0 {
// if len(userChats[0].Msg) > 0 {
// msgPb := &sdkws.MsgData{}
// err = proto.Unmarshal(userChats[0].Msg[len(userChats[0].Msg)-1].Msg, msgPb)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// return msgPb, nil
// }
// return nil, errors.New("len(userChats[0].Msg) < 0")
// }
// return nil, nil
//}
//
//func (d *db.DataBases) GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// regex := fmt.Sprintf("^%s", ID)
// findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": 1})
// var userChats []UserChat
// cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts)
// if err != nil {
// return nil, err
// }
// err = cursor.All(ctx, &userChats)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// var oldestMsg []byte
// if len(userChats) > 0 {
// for _, v := range userChats[0].Msg {
// if v.SendTime != 0 {
// oldestMsg = v.Msg
// break
// }
// }
// if len(oldestMsg) == 0 {
// oldestMsg = userChats[0].Msg[len(userChats[0].Msg)-1].Msg
// }
// msgPb := &sdkws.MsgData{}
// err = proto.Unmarshal(oldestMsg, msgPb)
// if err != nil {
// return nil, utils.Wrap(err, "")
// }
// return msgPb, nil
// }
// return nil, nil
//}
//
//func (d *db.DataBases) GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
// var hasSeqList []uint32
// singleCount := 0
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
//
// m := func(uid string, seqList []uint32) map[string][]uint32 {
// t := make(map[string][]uint32)
// for i := 0; i < len(seqList); i++ {
// seqUid := getSeqUid(uid, seqList[i])
// if value, ok := t[seqUid]; !ok {
// var temp []uint32
// t[seqUid] = append(temp, seqList[i])
// } else {
// t[seqUid] = append(value, seqList[i])
// }
// }
// return t
// }(uid, seqList)
// sChat := UserChat{}
// for seqUid, value := range m {
// if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil {
// log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
// continue
// }
// singleCount = 0
// for i := 0; i < len(sChat.Msg); i++ {
// msg := new(sdkws.MsgData)
// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
// log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
// return nil, err
// }
// if isContainInt32(msg.Seq, value) {
// seqMsg = append(seqMsg, msg)
// hasSeqList = append(hasSeqList, msg.Seq)
// singleCount++
// if singleCount == len(value) {
// break
// }
// }
// }
// }
// if len(hasSeqList) != len(seqList) {
// var diff []uint32
// diff = utils.Difference(hasSeqList, seqList)
// exceptionMSg := genExceptionMessageBySeqList(diff)
// seqMsg = append(seqMsg, exceptionMSg...)
//
// }
// return seqMsg, nil
//}
//func (d *db.DataBases) GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
// var hasSeqList []uint32
// singleCount := 0
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
//
// m := func(uid string, seqList []uint32) map[string][]uint32 {
// t := make(map[string][]uint32)
// for i := 0; i < len(seqList); i++ {
// seqUid := getSeqUid(uid, seqList[i])
// if value, ok := t[seqUid]; !ok {
// var temp []uint32
// t[seqUid] = append(temp, seqList[i])
// } else {
// t[seqUid] = append(value, seqList[i])
// }
// }
// return t
// }(groupID, seqList)
// sChat := UserChat{}
// for seqUid, value := range m {
// if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil {
// log.NewError(operationID, "not find seqGroupID", seqUid, value, groupID, seqList, err.Error())
// continue
// }
// singleCount = 0
// for i := 0; i < len(sChat.Msg); i++ {
// msg := new(sdkws.MsgData)
// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
// log.NewError(operationID, "Unmarshal err", seqUid, value, groupID, seqList, err.Error())
// return nil, err
// }
// if isContainInt32(msg.Seq, value) {
// seqMsg = append(seqMsg, msg)
// hasSeqList = append(hasSeqList, msg.Seq)
// singleCount++
// if singleCount == len(value) {
// break
// }
// }
// }
// }
// if len(hasSeqList) != len(seqList) {
// var diff []uint32
// diff = utils.Difference(hasSeqList, seqList)
// exceptionMSg := genExceptionSuperGroupMessageBySeqList(diff, groupID)
// seqMsg = append(seqMsg, exceptionMSg...)
//
// }
// return seqMsg, nil
//}
//
//func (d *db.DataBases) GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unexistSeqList []uint32, err error) {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// sChat := UserChat{}
// if err = c.FindOne(ctx, bson.M{"uid": suffixUserID}).Decode(&sChat); err != nil {
// log.NewError(operationID, "not find seqUid", suffixUserID, err.Error())
// return nil, nil, nil, utils.Wrap(err, "")
// }
// singleCount := 0
// var hasSeqList []uint32
// for i := 0; i < len(sChat.Msg); i++ {
// msg := new(sdkws.MsgData)
// if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
// log.NewError(operationID, "Unmarshal err", msg.String(), err.Error())
// return nil, nil, nil, err
// }
// if isContainInt32(msg.Seq, seqList) {
// indexList = append(indexList, i)
// seqMsg = append(seqMsg, msg)
// hasSeqList = append(hasSeqList, msg.Seq)
// singleCount++
// if singleCount == len(seqList) {
// break
// }
// }
// }
// for _, i := range seqList {
// if isContainInt32(i, hasSeqList) {
// continue
// }
// unexistSeqList = append(unexistSeqList, i)
// }
// return seqMsg, indexList, unexistSeqList, nil
//}
//
//func genExceptionMessageBySeqList(seqList []uint32) (exceptionMsg []*sdkws.MsgData) {
// for _, v := range seqList {
// msg := new(sdkws.MsgData)
// msg.Seq = v
// exceptionMsg = append(exceptionMsg, msg)
// }
// return exceptionMsg
//}
//
//func genExceptionSuperGroupMessageBySeqList(seqList []uint32, groupID string) (exceptionMsg []*sdkws.MsgData) {
// for _, v := range seqList {
// msg := new(sdkws.MsgData)
// msg.Seq = v
// msg.GroupID = groupID
// msg.SessionType = constant.SuperGroupChatType
// exceptionMsg = append(exceptionMsg, msg)
// }
// return exceptionMsg
//}
//
//func (d *db.DataBases) SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// newTime := getCurrentTimestampByMill()
// operationID := ""
// seqUid := getSeqUid(uid, m.MsgData.Seq)
// filter := bson.M{"uid": seqUid}
// var err error
// sMsg := MsgInfo{}
// sMsg.SendTime = sendTime
// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
// return utils.Wrap(err, "")
// }
// err = c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": sMsg}}).Err()
// log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime)
// if err != nil {
// sChat := UserChat{}
// sChat.UID = seqUid
// sChat.Msg = append(sChat.Msg, sMsg)
// if _, err = c.InsertOne(ctx, &sChat); err != nil {
// log.NewDebug(operationID, "InsertOne failed", filter)
// return utils.Wrap(err, "")
// }
// } else {
// log.NewDebug(operationID, "FindOneAndUpdate ok", filter)
// }
//
// log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime)
// return nil
//}
//
//func (d *DataBases) SaveUserChatListMongo2(uid string, sendTime int64, msgList []*pbMsg.MsgDataToDB) error {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// newTime := getCurrentTimestampByMill()
// operationID := ""
// seqUid := ""
// msgListToMongo := make([]MsgInfo, 0)
//
// for _, m := range msgList {
// seqUid = getSeqUid(uid, m.MsgData.Seq)
// var err error
// sMsg := MsgInfo{}
// sMsg.SendTime = sendTime
// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
// return utils.Wrap(err, "")
// }
// msgListToMongo = append(msgListToMongo, sMsg)
// }
//
// filter := bson.M{"uid": seqUid}
// log.NewDebug(operationID, "filter ", seqUid)
// err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
// log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime)
// if err != nil {
// sChat := UserChat{}
// sChat.UID = seqUid
// sChat.Msg = msgListToMongo
//
// if _, err = c.InsertOne(ctx, &sChat); err != nil {
// log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
// return utils.Wrap(err, "")
// }
// } else {
// log.NewDebug(operationID, "FindOneAndUpdate ok", filter)
// }
//
// log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime)
// return nil
//}
//func (d *db.DataBases) SaveUserChat(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error {
// var seqUid string
// newTime := getCurrentTimestampByMill()
// session := d.mgoSession.Clone()
// if session == nil {
// return errors.New("session == nil")
// }
// defer session.Close()
// log.NewDebug("", "get mgoSession cost time", getCurrentTimestampByMill()-newTime)
// c := session.DB(config.Config.Mongo.DBDatabase).C(cChat)
// seqUid = getSeqUid(uid, m.MsgData.Seq)
// n, err := c.Find(bson.M{"uid": seqUid}).Count()
// if err != nil {
// return err
// }
// log.NewDebug("", "find mgo uid cost time", getCurrentTimestampByMill()-newTime)
// sMsg := MsgInfo{}
// sMsg.SendTime = sendTime
// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
// return err
// }
// if n == 0 {
// sChat := UserChat{}
// sChat.UID = seqUid
// sChat.Msg = append(sChat.Msg, sMsg)
// err = c.Insert(&sChat)
// if err != nil {
// return err
// }
// } else {
// err = c.Update(bson.M{"uid": seqUid}, bson.M{"$push": bson.M{"msg": sMsg}})
// if err != nil {
// return err
// }
// }
// log.NewDebug("", "insert mgo data cost time", getCurrentTimestampByMill()-newTime)
// return nil
//}
//
//func (d *db.DataBases) DelUserChatMongo2(uid string) error {
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
// filter := bson.M{"uid": uid}
//
// delTime := time.Now().Unix() - int64(config.Config.Mongo.DBRetainChatRecords)*24*3600
// if _, err := c.UpdateOne(ctx, filter, bson.M{"$pull": bson.M{"msg": bson.M{"sendtime": bson.M{"$lte": delTime}}}}); err != nil {
// return utils.Wrap(err, "")
// }
// return nil
//}
//
//func (d *db.DataBases) MgoSkipUID(count int) (string, error) {
// return "", nil
//session := d.mgoSession.Clone()
//if session == nil {
// return "", errors.New("session == nil")
//}
//defer session.Close()
//
//c := session.DB(config.Config.Mongo.DBDatabase).C(cChat)
//
//sChat := UserChat{}
//c.Find(nil).Skip(count).Limit(1).One(&sChat)
//return sChat.UID, nil
}
//func generateTagID(tagName, userID string) string {
// return utils.Md5(tagName + userID + strconv.Itoa(rand.Int()) + time.Now().String())
//}
//func getCurrentTimestampByMill() int64 {
// return time.Now().UnixNano() / 1e6
//}
//func GetCurrentTimestampByMill() int64 {
// return time.Now().UnixNano() / 1e6
//}
//func getSeqUid(uid string, seq uint32) string {
// seqSuffix := seq / singleGocMsgNum
// return indexGen(uid, seqSuffix)
//}
//
//func getSeqUserIDList(userID string, maxSeq uint32) []string {
// seqMaxSuffix := maxSeq / singleGocMsgNum
// var seqUserIDList []string
// for i := 0; i <= int(seqMaxSuffix); i++ {
// seqUserID := indexGen(userID, uint32(i))
// seqUserIDList = append(seqUserIDList, seqUserID)
// }
// return seqUserIDList
//}
//
//func getSeqSuperGroupID(groupID string, seq uint32) string {
// seqSuffix := seq / singleGocMsgNum
// return superGroupIndexGen(groupID, seqSuffix)
//}
//
//func GetSeqUid(uid string, seq uint32) string {
// return getSeqUid(uid, seq)
//}
//
//func getMsgIndex(seq uint32) int {
// seqSuffix := seq / singleGocMsgNum
// var index uint32
// if seqSuffix == 0 {
// index = (seq - seqSuffix*singleGocMsgNum) - 1
// } else {
// index = seq - seqSuffix*singleGocMsgNum
// }
// return int(index)
//}
//func isContainInt32(target uint32, List []uint32) bool {
// for _, element := range List {
// if target == element {
// return true
// }
// }
// return false
//}
//
//func isNotContainInt32(target uint32, List []uint32) bool {
// for _, i := range List {
// if i == target {
// return false
// }
// }
// return true
//}
//
//func indexGen(uid string, seqSuffix uint32) string {
// return uid + ":" + strconv.FormatInt(int64(seqSuffix), 10)
//}
//func superGroupIndexGen(groupID string, seqSuffix uint32) string {
// return "super_group_" + groupID + ":" + strconv.FormatInt(int64(seqSuffix), 10)
//}

View File

@ -1,561 +1,131 @@
package unrelation package unrelation
import ( import (
"Open_IM/pkg/common/constant" table "Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/common/db/table/unrelation"
"Open_IM/pkg/proto/sdkws" "Open_IM/pkg/proto/sdkws"
"Open_IM/pkg/utils" "Open_IM/pkg/utils"
"context" "context"
"errors" "errors"
"github.com/go-redis/redis/v8" "fmt"
"github.com/gogo/protobuf/sortkeys" "github.com/golang/protobuf/proto"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/options"
"sync"
"time"
) )
var ErrMsgListNotExist = errors.New("user not have msg in mongoDB") var ErrMsgListNotExist = errors.New("user not have msg in mongoDB")
var ErrMsgNotFound = errors.New("msg not found")
type MsgMongoDriver struct { type MsgMongoDriver struct {
mgoDB *mongo.Database mgoDB *mongo.Database
MsgCollection *mongo.Collection MsgCollection *mongo.Collection
msg table.MsgDocModel
} }
func NewMsgMongoDriver(mgoDB *mongo.Database) *MsgMongoDriver { func NewMsgMongoDriver(mgoDB *mongo.Database) *MsgMongoDriver {
return &MsgMongoDriver{mgoDB: mgoDB, MsgCollection: mgoDB.Collection(unrelation.CChat)} return &MsgMongoDriver{mgoDB: mgoDB, MsgCollection: mgoDB.Collection(table.MsgDocModel{}.TableName())}
} }
func (m *MsgMongoDriver) FindOneAndUpdate(ctx context.Context, filter, update, output interface{}, opts ...*options.FindOneAndUpdateOptions) error { func (m *MsgMongoDriver) PushMsgsToDoc(ctx context.Context, docID string, msgsToMongo []table.MsgInfoModel) error {
return m.MsgCollection.FindOneAndUpdate(ctx, filter, update, opts...).Decode(output) filter := bson.M{"uid": docID}
return m.MsgCollection.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgsToMongo}}}).Err()
} }
func (m *MsgMongoDriver) UpdateOne(ctx context.Context, filter, update interface{}, opts ...*options.UpdateOptions) error { func (m *MsgMongoDriver) Create(ctx context.Context, model *table.MsgDocModel) error {
_, err := m.MsgCollection.UpdateOne(ctx, filter, update, opts...) _, err := m.MsgCollection.InsertOne(ctx, model)
return err return err
} }
// database controller func (m *MsgMongoDriver) UpdateMsgStatusByIndexInOneDoc(ctx context.Context, docID string, msg *sdkws.MsgData, seqIndex int, status int32) error {
func (m *MsgMongoDriver) DelMsgBySeqList(ctx context.Context, userID string, seqList []uint32) (totalUnExistSeqList []uint32, err error) { msg.Status = status
sortkeys.Uint32s(seqList)
suffixUserID2SubSeqList := func(uid string, seqList []uint32) map[string][]uint32 {
t := make(map[string][]uint32)
for i := 0; i < len(seqList); i++ {
seqUid := getSeqUid(uid, seqList[i])
if value, ok := t[seqUid]; !ok {
var temp []uint32
t[seqUid] = append(temp, seqList[i])
} else {
t[seqUid] = append(value, seqList[i])
}
}
return t
}(userID, seqList)
lock := sync.Mutex{}
var wg sync.WaitGroup
wg.Add(len(suffixUserID2SubSeqList))
for k, v := range suffixUserID2SubSeqList {
go func(suffixUserID string, subSeqList []uint32) {
defer wg.Done()
unexistSeqList, err := m.DelMsgBySeqListInOneDoc(ctx, suffixUserID, subSeqList)
if err != nil {
return
}
lock.Lock()
totalUnExistSeqList = append(totalUnExistSeqList, unexistSeqList...)
lock.Unlock()
}(k, v)
}
return totalUnExistSeqList, nil
}
func (m *MsgMongoDriver) DelMsgBySeqListInOneDoc(ctx context.Context, suffixUserID string, seqList []uint32) ([]uint32, error) {
seqMsgList, indexList, unexistSeqList, err := m.GetMsgAndIndexBySeqListInOneMongo2(suffixUserID, seqList)
if err != nil {
return nil, utils.Wrap(err, "")
}
for i, v := range seqMsgList {
if err := m.ReplaceMsgByIndex(suffixUserID, v, operationID, indexList[i]); err != nil {
return nil, utils.Wrap(err, "")
}
}
return unexistSeqList, nil
}
// database
func (m *MsgMongoDriver) DelMsgLogic(ctx context.Context, uid string, seqList []uint32) error {
sortkeys.Uint32s(seqList)
seqMsgs, err := d.GetMsgBySeqListMongo2(ctx, uid, seqList)
if err != nil {
return utils.Wrap(err, "")
}
for _, seqMsg := range seqMsgs {
seqMsg.Status = constant.MsgDeleted
if err = d.ReplaceMsgBySeq(ctx, uid, seqMsg); err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "ReplaceMsgListBySeq error", err.Error())
}
}
return nil
}
// model
func (m *MsgMongoDriver) ReplaceMsgByIndex(ctx context.Context, suffixUserID string, msg *sdkws.MsgData, seqIndex int) error {
log.NewInfo(operationID, utils.GetSelfFuncName(), suffixUserID, *msg)
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
s := fmt.Sprintf("msg.%d.msg", seqIndex)
log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s)
msg.Status = constant.MsgDeleted
bytes, err := proto.Marshal(msg) bytes, err := proto.Marshal(msg)
if err != nil { if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal failed ", err.Error(), msg.String())
return utils.Wrap(err, "") return utils.Wrap(err, "")
} }
updateResult, err := c.UpdateOne(ctx, bson.M{"uid": suffixUserID}, bson.M{"$set": bson.M{s: bytes}}) _, err = m.MsgCollection.UpdateOne(ctx, bson.M{"uid": docID}, bson.M{"$set": bson.M{fmt.Sprintf("msg.%d.msg", seqIndex): bytes}})
log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult)
if err != nil { if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error())
return utils.Wrap(err, "") return utils.Wrap(err, "")
} }
return nil return nil
} }
func (d *db.DataBases) ReplaceMsgBySeq(uid string, msg *sdkws.MsgData, operationID string) error { func (m *MsgMongoDriver) FindOneByDocID(ctx context.Context, docID string) (*table.MsgDocModel, error) {
log.NewInfo(operationID, utils.GetSelfFuncName(), uid, *msg) doc := &table.MsgDocModel{}
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) err := m.MsgCollection.FindOne(ctx, bson.M{"uid": docID}).Decode(doc)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) return doc, err
uid = getSeqUid(uid, msg.Seq)
seqIndex := getMsgIndex(msg.Seq)
s := fmt.Sprintf("msg.%d.msg", seqIndex)
log.NewDebug(operationID, utils.GetSelfFuncName(), seqIndex, s)
bytes, err := proto.Marshal(msg)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "proto marshal", err.Error())
return utils.Wrap(err, "")
}
updateResult, err := c.UpdateOne(
ctx, bson.M{"uid": uid},
bson.M{"$set": bson.M{s: bytes}})
log.NewInfo(operationID, utils.GetSelfFuncName(), updateResult)
if err != nil {
log.NewError(operationID, utils.GetSelfFuncName(), "UpdateOne", err.Error())
return utils.Wrap(err, "")
}
return nil
} }
func (d *db.DataBases) UpdateOneMsgList(msg *UserChat) error { func (m *MsgMongoDriver) GetMsgsByIndex(ctx context.Context, sourceID string, index int64) (*table.MsgDocModel, error) {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
_, err := c.UpdateOne(ctx, bson.M{"uid": msg.UID}, bson.M{"$set": bson.M{"msg": msg.Msg}})
return err
}
func (d *db.DataBases) GetMsgBySeqList(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
log.NewInfo(operationID, utils.GetSelfFuncName(), uid, seqList)
var hasSeqList []uint32
singleCount := 0
session := d.mgoSession.Clone()
if session == nil {
return nil, errors.New("session == nil")
}
defer session.Close()
c := session.DB(config.Config.Mongo.DBDatabase).C(cChat)
m := func(uid string, seqList []uint32) map[string][]uint32 {
t := make(map[string][]uint32)
for i := 0; i < len(seqList); i++ {
seqUid := getSeqUid(uid, seqList[i])
if value, ok := t[seqUid]; !ok {
var temp []uint32
t[seqUid] = append(temp, seqList[i])
} else {
t[seqUid] = append(value, seqList[i])
}
}
return t
}(uid, seqList)
sChat := UserChat{}
for seqUid, value := range m {
if err = c.Find(bson.M{"uid": seqUid}).One(&sChat); err != nil {
log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
continue
}
singleCount = 0
for i := 0; i < len(sChat.Msg); i++ {
msg := new(sdkws.MsgData)
if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
return nil, err
}
if isContainInt32(msg.Seq, value) {
seqMsg = append(seqMsg, msg)
hasSeqList = append(hasSeqList, msg.Seq)
singleCount++
if singleCount == len(value) {
break
}
}
}
}
if len(hasSeqList) != len(seqList) {
var diff []uint32
diff = utils.Difference(hasSeqList, seqList)
exceptionMSg := genExceptionMessageBySeqList(diff)
seqMsg = append(seqMsg, exceptionMSg...)
}
return seqMsg, nil
}
// model
func (d *db.DataBases) GetUserMsgListByIndex(docID string, index int64) (*UserChat, error) {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
regex := fmt.Sprintf("^%s", docID)
findOpts := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"uid": 1}) findOpts := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"uid": 1})
var msgs []UserChat cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: fmt.Sprintf("^%s", sourceID)}}, findOpts)
//primitive.Regex{Pattern: regex}
cursor, err := c.Find(ctx, bson.M{"uid": primitive.Regex{Pattern: regex}}, findOpts)
if err != nil { if err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
var msgs []table.MsgDocModel
err = cursor.All(context.Background(), &msgs) err = cursor.All(context.Background(), &msgs)
if err != nil { if err != nil {
return nil, utils.Wrap(err, fmt.Sprintf("cursor is %s", cursor.Current.String())) return nil, utils.Wrap(err, fmt.Sprintf("cursor is %s", cursor.Current.String()))
} }
if len(msgs) > 0 { if len(msgs) > 0 {
return &msgs[0], nil return &msgs[0], nil
} else {
return nil, ErrMsgListNotExist
} }
return nil, ErrMsgListNotExist
} }
// model func (m *MsgMongoDriver) GetNewestMsg(ctx context.Context, sourceID string) (*table.MsgInfoModel, error) {
func (d *db.DataBases) DelMongoMsgs(IDList []string) error { var msgDocs []table.MsgDocModel
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": bson.M{"$regex": fmt.Sprintf("^%s", sourceID)}}, options.Find().SetLimit(1).SetSort(bson.M{"uid": -1}))
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat) if err != nil {
_, err := c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": IDList}}) return nil, utils.Wrap(err, "")
}
err = cursor.All(ctx, &msgDocs)
if err != nil {
return nil, utils.Wrap(err, "")
}
if len(msgDocs) > 0 {
if len(msgDocs[0].Msg) > 0 {
return &msgDocs[0].Msg[len(msgDocs[0].Msg)-1], nil
}
return nil, errors.New("len(msgDocs[0].Msg) < 0")
}
return nil, ErrMsgNotFound
}
func (m *MsgMongoDriver) GetOldestMsg(ctx context.Context, sourceID string) (*table.MsgInfoModel, error) {
var msgDocs []table.MsgDocModel
cursor, err := m.MsgCollection.Find(ctx, bson.M{"uid": bson.M{"$regex": fmt.Sprintf("^%s", sourceID)}}, options.Find().SetLimit(1).SetSort(bson.M{"uid": 1}))
if err != nil {
return nil, err
}
err = cursor.All(ctx, &msgDocs)
if err != nil {
return nil, utils.Wrap(err, "")
}
var oldestMsg table.MsgInfoModel
if len(msgDocs) > 0 {
for _, v := range msgDocs[0].Msg {
if v.SendTime != 0 {
oldestMsg = v
break
}
}
if len(oldestMsg.Msg) == 0 {
if len(msgDocs[0].Msg) > 0 {
oldestMsg = msgDocs[0].Msg[0]
}
}
return &oldestMsg, nil
}
return nil, ErrMsgNotFound
}
func (m *MsgMongoDriver) Delete(ctx context.Context, docIDs []string) error {
_, err := m.MsgCollection.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": docIDs}})
return err return err
} }
// model func (m *MsgMongoDriver) UpdateOneDoc(ctx context.Context, msg *table.MsgDocModel) error {
func (d *db.DataBases) ReplaceMsgToBlankByIndex(suffixID string, index int) (replaceMaxSeq uint32, err error) { _, err := m.MsgCollection.UpdateOne(ctx, bson.M{"uid": msg.DocID}, bson.M{"$set": bson.M{"msg": msg.Msg}})
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second) return err
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
userChat := &UserChat{}
err = c.FindOne(ctx, bson.M{"uid": suffixID}).Decode(&userChat)
if err != nil {
return 0, err
}
for i, msg := range userChat.Msg {
if i <= index {
msgPb := &sdkws.MsgData{}
if err = proto.Unmarshal(msg.Msg, msgPb); err != nil {
continue
}
newMsgPb := &sdkws.MsgData{Seq: msgPb.Seq}
bytes, err := proto.Marshal(newMsgPb)
if err != nil {
continue
}
msg.Msg = bytes
msg.SendTime = 0
replaceMaxSeq = msgPb.Seq
}
}
_, err = c.UpdateOne(ctx, bson.M{"uid": suffixID}, bson.M{"$set": bson.M{"msg": userChat.Msg}})
return replaceMaxSeq, err
}
func (d *db.DataBases) GetNewestMsg(ID string) (msg *sdkws.MsgData, err error) {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
regex := fmt.Sprintf("^%s", ID)
findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": -1})
var userChats []UserChat
cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts)
if err != nil {
return nil, err
}
err = cursor.All(ctx, &userChats)
if err != nil {
return nil, utils.Wrap(err, "")
}
if len(userChats) > 0 {
if len(userChats[0].Msg) > 0 {
msgPb := &sdkws.MsgData{}
err = proto.Unmarshal(userChats[0].Msg[len(userChats[0].Msg)-1].Msg, msgPb)
if err != nil {
return nil, utils.Wrap(err, "")
}
return msgPb, nil
}
return nil, errors.New("len(userChats[0].Msg) < 0")
}
return nil, nil
}
func (d *db.DataBases) GetOldestMsg(ID string) (msg *sdkws.MsgData, err error) {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
regex := fmt.Sprintf("^%s", ID)
findOpts := options.Find().SetLimit(1).SetSort(bson.M{"uid": 1})
var userChats []UserChat
cursor, err := c.Find(ctx, bson.M{"uid": bson.M{"$regex": regex}}, findOpts)
if err != nil {
return nil, err
}
err = cursor.All(ctx, &userChats)
if err != nil {
return nil, utils.Wrap(err, "")
}
var oldestMsg []byte
if len(userChats) > 0 {
for _, v := range userChats[0].Msg {
if v.SendTime != 0 {
oldestMsg = v.Msg
break
}
}
if len(oldestMsg) == 0 {
oldestMsg = userChats[0].Msg[len(userChats[0].Msg)-1].Msg
}
msgPb := &sdkws.MsgData{}
err = proto.Unmarshal(oldestMsg, msgPb)
if err != nil {
return nil, utils.Wrap(err, "")
}
return msgPb, nil
}
return nil, nil
}
func (d *db.DataBases) GetMsgBySeqListMongo2(uid string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
var hasSeqList []uint32
singleCount := 0
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
m := func(uid string, seqList []uint32) map[string][]uint32 {
t := make(map[string][]uint32)
for i := 0; i < len(seqList); i++ {
seqUid := getSeqUid(uid, seqList[i])
if value, ok := t[seqUid]; !ok {
var temp []uint32
t[seqUid] = append(temp, seqList[i])
} else {
t[seqUid] = append(value, seqList[i])
}
}
return t
}(uid, seqList)
sChat := UserChat{}
for seqUid, value := range m {
if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil {
log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
continue
}
singleCount = 0
for i := 0; i < len(sChat.Msg); i++ {
msg := new(sdkws.MsgData)
if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
return nil, err
}
if isContainInt32(msg.Seq, value) {
seqMsg = append(seqMsg, msg)
hasSeqList = append(hasSeqList, msg.Seq)
singleCount++
if singleCount == len(value) {
break
}
}
}
}
if len(hasSeqList) != len(seqList) {
var diff []uint32
diff = utils.Difference(hasSeqList, seqList)
exceptionMSg := genExceptionMessageBySeqList(diff)
seqMsg = append(seqMsg, exceptionMSg...)
}
return seqMsg, nil
}
func (d *db.DataBases) GetSuperGroupMsgBySeqListMongo(groupID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, err error) {
var hasSeqList []uint32
singleCount := 0
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
m := func(uid string, seqList []uint32) map[string][]uint32 {
t := make(map[string][]uint32)
for i := 0; i < len(seqList); i++ {
seqUid := getSeqUid(uid, seqList[i])
if value, ok := t[seqUid]; !ok {
var temp []uint32
t[seqUid] = append(temp, seqList[i])
} else {
t[seqUid] = append(value, seqList[i])
}
}
return t
}(groupID, seqList)
sChat := UserChat{}
for seqUid, value := range m {
if err = c.FindOne(ctx, bson.M{"uid": seqUid}).Decode(&sChat); err != nil {
log.NewError(operationID, "not find seqGroupID", seqUid, value, groupID, seqList, err.Error())
continue
}
singleCount = 0
for i := 0; i < len(sChat.Msg); i++ {
msg := new(sdkws.MsgData)
if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
log.NewError(operationID, "Unmarshal err", seqUid, value, groupID, seqList, err.Error())
return nil, err
}
if isContainInt32(msg.Seq, value) {
seqMsg = append(seqMsg, msg)
hasSeqList = append(hasSeqList, msg.Seq)
singleCount++
if singleCount == len(value) {
break
}
}
}
}
if len(hasSeqList) != len(seqList) {
var diff []uint32
diff = utils.Difference(hasSeqList, seqList)
exceptionMSg := genExceptionSuperGroupMessageBySeqList(diff, groupID)
seqMsg = append(seqMsg, exceptionMSg...)
}
return seqMsg, nil
}
func (d *db.DataBases) GetMsgAndIndexBySeqListInOneMongo2(suffixUserID string, seqList []uint32, operationID string) (seqMsg []*sdkws.MsgData, indexList []int, unexistSeqList []uint32, err error) {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
sChat := UserChat{}
if err = c.FindOne(ctx, bson.M{"uid": suffixUserID}).Decode(&sChat); err != nil {
log.NewError(operationID, "not find seqUid", suffixUserID, err.Error())
return nil, nil, nil, utils.Wrap(err, "")
}
singleCount := 0
var hasSeqList []uint32
for i := 0; i < len(sChat.Msg); i++ {
msg := new(sdkws.MsgData)
if err = proto.Unmarshal(sChat.Msg[i].Msg, msg); err != nil {
log.NewError(operationID, "Unmarshal err", msg.String(), err.Error())
return nil, nil, nil, err
}
if isContainInt32(msg.Seq, seqList) {
indexList = append(indexList, i)
seqMsg = append(seqMsg, msg)
hasSeqList = append(hasSeqList, msg.Seq)
singleCount++
if singleCount == len(seqList) {
break
}
}
}
for _, i := range seqList {
if isContainInt32(i, hasSeqList) {
continue
}
unexistSeqList = append(unexistSeqList, i)
}
return seqMsg, indexList, unexistSeqList, nil
}
func (d *db.DataBases) SaveUserChatMongo2(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error {
ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
newTime := getCurrentTimestampByMill()
operationID := ""
seqUid := getSeqUid(uid, m.MsgData.Seq)
filter := bson.M{"uid": seqUid}
var err error
sMsg := MsgInfo{}
sMsg.SendTime = sendTime
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return utils.Wrap(err, "")
}
err = c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": sMsg}}).Err()
log.NewWarn(operationID, "get mgoSession cost time", getCurrentTimestampByMill()-newTime)
if err != nil {
sChat := UserChat{}
sChat.UID = seqUid
sChat.Msg = append(sChat.Msg, sMsg)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
log.NewDebug(operationID, "InsertOne failed", filter)
return utils.Wrap(err, "")
}
} else {
log.NewDebug(operationID, "FindOneAndUpdate ok", filter)
}
log.NewDebug(operationID, "find mgo uid cost time", getCurrentTimestampByMill()-newTime)
return nil
}
func (d *db.DataBases) SaveUserChat(uid string, sendTime int64, m *pbMsg.MsgDataToDB) error {
var seqUid string
newTime := getCurrentTimestampByMill()
session := d.mgoSession.Clone()
if session == nil {
return errors.New("session == nil")
}
defer session.Close()
log.NewDebug("", "get mgoSession cost time", getCurrentTimestampByMill()-newTime)
c := session.DB(config.Config.Mongo.DBDatabase).C(cChat)
seqUid = getSeqUid(uid, m.MsgData.Seq)
n, err := c.Find(bson.M{"uid": seqUid}).Count()
if err != nil {
return err
}
log.NewDebug("", "find mgo uid cost time", getCurrentTimestampByMill()-newTime)
sMsg := MsgInfo{}
sMsg.SendTime = sendTime
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return err
}
if n == 0 {
sChat := UserChat{}
sChat.UID = seqUid
sChat.Msg = append(sChat.Msg, sMsg)
err = c.Insert(&sChat)
if err != nil {
return err
}
} else {
err = c.Update(bson.M{"uid": seqUid}, bson.M{"$push": bson.M{"msg": sMsg}})
if err != nil {
return err
}
}
log.NewDebug("", "insert mgo data cost time", getCurrentTimestampByMill()-newTime)
return nil
}
func (d *db.DataBases) CleanUpUserMsgFromMongo(userID string, operationID string) error {
ctx := context.Background()
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
maxSeq, err := d.GetUserMaxSeq(userID)
if err == redis.Nil {
return nil
}
if err != nil {
return utils.Wrap(err, "")
}
seqUsers := getSeqUserIDList(userID, uint32(maxSeq))
log.Error(operationID, "getSeqUserIDList", seqUsers)
_, err = c.DeleteMany(ctx, bson.M{"uid": bson.M{"$in": seqUsers}})
if err == mongo.ErrNoDocuments {
return nil
}
return utils.Wrap(err, "")
} }

View File

@ -127,7 +127,7 @@ type SuperGroupMongoDriver struct {
//} //}
func (s *SuperGroupMongoDriver) CreateSuperGroup(ctx context.Context, groupID string, initMemberIDs []string, tx ...any) error { func (s *SuperGroupMongoDriver) CreateSuperGroup(ctx context.Context, groupID string, initMemberIDs []string, tx ...any) error {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
_, err := s.superGroupCollection.InsertOne(ctx, &unrelation.SuperGroupModel{ _, err := s.superGroupCollection.InsertOne(ctx, &unrelation.SuperGroupModel{
GroupID: groupID, GroupID: groupID,
MemberIDs: initMemberIDs, MemberIDs: initMemberIDs,
@ -147,7 +147,7 @@ func (s *SuperGroupMongoDriver) CreateSuperGroup(ctx context.Context, groupID st
} }
func (s *SuperGroupMongoDriver) TakeSuperGroup(ctx context.Context, groupID string, tx ...any) (group *unrelation.SuperGroupModel, err error) { func (s *SuperGroupMongoDriver) TakeSuperGroup(ctx context.Context, groupID string, tx ...any) (group *unrelation.SuperGroupModel, err error) {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
if err := s.superGroupCollection.FindOne(ctx, bson.M{"group_id": groupID}).Decode(&group); err != nil { if err := s.superGroupCollection.FindOne(ctx, bson.M{"group_id": groupID}).Decode(&group); err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
@ -155,14 +155,15 @@ func (s *SuperGroupMongoDriver) TakeSuperGroup(ctx context.Context, groupID stri
} }
func (s *SuperGroupMongoDriver) FindSuperGroup(ctx context.Context, groupIDs []string, tx ...any) (groups []*unrelation.SuperGroupModel, err error) { func (s *SuperGroupMongoDriver) FindSuperGroup(ctx context.Context, groupIDs []string, tx ...any) (groups []*unrelation.SuperGroupModel, err error) {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
cursor, err := s.superGroupCollection.Find(ctx, bson.M{"group_id": bson.M{ cursor, err := s.superGroupCollection.Find(ctx, bson.M{"group_id": bson.M{
"$in": groupIDs, "$in": groupIDs,
}}) }})
if err != nil { if err != nil {
return nil, utils.Wrap(err, "") return nil, err
} }
defer cursor.Close(ctx) defer cursor.Close(ctx)
if err := cursor.All(ctx, &groups); err != nil { if err := cursor.All(ctx, &groups); err != nil {
return nil, utils.Wrap(err, "") return nil, utils.Wrap(err, "")
} }
@ -170,7 +171,7 @@ func (s *SuperGroupMongoDriver) FindSuperGroup(ctx context.Context, groupIDs []s
} }
func (s *SuperGroupMongoDriver) AddUserToSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error { func (s *SuperGroupMongoDriver) AddUserToSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) opts := options.Session().SetDefaultReadConcern(readconcern.Majority())
return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error {
_, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$addToSet": bson.M{"member_id_list": bson.M{"$each": userIDs}}}) _, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$addToSet": bson.M{"member_id_list": bson.M{"$each": userIDs}}})
@ -194,7 +195,7 @@ func (s *SuperGroupMongoDriver) AddUserToSuperGroup(ctx context.Context, groupID
} }
func (s *SuperGroupMongoDriver) RemoverUserFromSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error { func (s *SuperGroupMongoDriver) RemoverUserFromSuperGroup(ctx context.Context, groupID string, userIDs []string, tx ...any) error {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) opts := options.Session().SetDefaultReadConcern(readconcern.Majority())
return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error {
_, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$pull": bson.M{"member_id_list": bson.M{"$in": userIDs}}}) _, err := s.superGroupCollection.UpdateOne(sCtx, bson.M{"group_id": groupID}, bson.M{"$pull": bson.M{"member_id_list": bson.M{"$in": userIDs}}})
@ -212,14 +213,14 @@ func (s *SuperGroupMongoDriver) RemoverUserFromSuperGroup(ctx context.Context, g
} }
func (s *SuperGroupMongoDriver) GetSuperGroupByUserID(ctx context.Context, userID string, tx ...any) (*unrelation.UserToSuperGroupModel, error) { func (s *SuperGroupMongoDriver) GetSuperGroupByUserID(ctx context.Context, userID string, tx ...any) (*unrelation.UserToSuperGroupModel, error) {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
var user unrelation.UserToSuperGroupModel var user unrelation.UserToSuperGroupModel
err := s.userToSuperGroupCollection.FindOne(ctx, bson.M{"user_id": userID}).Decode(&user) err := s.userToSuperGroupCollection.FindOne(ctx, bson.M{"user_id": userID}).Decode(&user)
return &user, utils.Wrap(err, "") return &user, utils.Wrap(err, "")
} }
func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error { func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
group, err := s.TakeSuperGroup(ctx, groupID, tx...) group, err := s.TakeSuperGroup(ctx, groupID, tx...)
if err != nil { if err != nil {
return err return err
@ -231,7 +232,7 @@ func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID st
} }
//func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error { //func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID string, tx ...any) error {
// ctx = s.getTxCtx(ctx, tx) // ctx = getTxCtx(ctx, tx)
// opts := options.Session().SetDefaultReadConcern(readconcern.Majority()) // opts := options.Session().SetDefaultReadConcern(readconcern.Majority())
// return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error { // return s.MgoDB.Client().UseSessionWithOptions(ctx, opts, func(sCtx mongo.SessionContext) error {
// superGroup := &unrelation.SuperGroupModel{} // superGroup := &unrelation.SuperGroupModel{}
@ -249,7 +250,7 @@ func (s *SuperGroupMongoDriver) DeleteSuperGroup(ctx context.Context, groupID st
//} //}
func (s *SuperGroupMongoDriver) RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error { func (s *SuperGroupMongoDriver) RemoveGroupFromUser(ctx context.Context, groupID string, userIDs []string, tx ...any) error {
ctx = s.getTxCtx(ctx, tx) ctx = getTxCtx(ctx, tx)
_, err := s.userToSuperGroupCollection.UpdateOne(ctx, bson.M{"user_id": bson.M{"$in": userIDs}}, bson.M{"$pull": bson.M{"group_id_list": groupID}}) _, err := s.userToSuperGroupCollection.UpdateOne(ctx, bson.M{"user_id": bson.M{"$in": userIDs}}, bson.M{"$pull": bson.M{"group_id_list": groupID}})
return utils.Wrap(err, "") return utils.Wrap(err, "")
} }

View File

@ -7,7 +7,7 @@
package http package http
import ( import (
cbApi "Open_IM/pkg/callback_struct" cbApi "Open_IM/pkg/callbackstruct"
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant" "Open_IM/pkg/common/constant"
"bytes" "bytes"

View File

@ -9,7 +9,7 @@ import (
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
promePkg "Open_IM/pkg/common/prometheus" prome "Open_IM/pkg/common/prometheus"
) )
type Producer struct { type Producer struct {
@ -66,7 +66,7 @@ func (p *Producer) SendMessage(m proto.Message, key string, operationID string)
a, b, c := p.producer.SendMessage(kMsg) a, b, c := p.producer.SendMessage(kMsg)
log.Info(operationID, "ByteEncoder SendMessage end", "key ", kMsg.Key.Length(), kMsg.Value.Length(), p.producer) log.Info(operationID, "ByteEncoder SendMessage end", "key ", kMsg.Key.Length(), kMsg.Value.Length(), p.producer)
if c == nil { if c == nil {
promePkg.PromeInc(promePkg.SendMsgCounter) prome.PromeInc(prome.SendMsgCounter)
} }
return a, b, utils.Wrap(c, "") return a, b, utils.Wrap(c, "")
} }

View File

@ -1,4 +1,4 @@
package prometheus package prome
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"

View File

@ -1,4 +1,4 @@
package prometheus package prome
import ( import (
"context" "context"

View File

@ -1,4 +1,4 @@
package prometheus package prome
import ( import (
"Open_IM/pkg/common/config" "Open_IM/pkg/common/config"

View File

@ -7,6 +7,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
"github.com/pkg/errors" "github.com/pkg/errors"
"hash/crc32"
"math/rand" "math/rand"
"reflect" "reflect"
"runtime" "runtime"
@ -226,3 +227,7 @@ func ProtoToMap(pb proto.Message, idFix bool) map[string]interface{} {
} }
return out return out
} }
func GetHashCode(s string) uint32 {
return crc32.ChecksumIEEE([]byte(s))
}