mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-04-25 11:06:43 +08:00
push
This commit is contained in:
parent
2359d7ab37
commit
4fc00109ef
@ -20,7 +20,9 @@ func main() {
|
||||
log.NewPrivateLog(constant.LogFileName)
|
||||
fmt.Println("start push rpc server, port: ", *rpcPort, ", OpenIM version: ", constant.CurrentVersion, "\n")
|
||||
pusher := push.Push{}
|
||||
pusher.Init(*rpcPort)
|
||||
if err := pusher.Init(*rpcPort); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
pusher.Run(*prometheusPort)
|
||||
wg.Wait()
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
type ClearMsgTool struct {
|
||||
msgInterface controller.MsgInterface
|
||||
msgInterface controller.MsgDatabase
|
||||
userInterface controller.UserDatabase
|
||||
groupInterface controller.GroupDatabase
|
||||
}
|
||||
@ -73,7 +73,11 @@ func (c *ClearMsgTool) ClearSuperGroupMsg(ctx context.Context, workingGroupIDLis
|
||||
log.NewError(tracelog.GetOperationID(ctx), utils.GetSelfFuncName(), err.Error(), "GetUserMinMaxSeqInMongoAndCache failed", groupID)
|
||||
continue
|
||||
}
|
||||
c.FixGroupUserSeq(ctx, userIDs, groupID)
|
||||
//for _, userID := range userIDs {
|
||||
// c.msgInterface.getgroup
|
||||
// c.FixGroupUserSeq(ctx, userID, groupID, )
|
||||
//
|
||||
//}
|
||||
c.CheckMaxSeqWithMongo(ctx, groupID, maxSeqCache, maxSeqMongo, constant.WriteDiffusion)
|
||||
}
|
||||
}
|
||||
|
@ -56,8 +56,8 @@ type OnlineHistoryRedisConsumerHandler struct {
|
||||
producerToModify *kafka.Producer
|
||||
producerToMongo *kafka.Producer
|
||||
|
||||
msgInterface controller.MsgInterface
|
||||
cache cache.Cache
|
||||
msgDatabase controller.MsgDatabase
|
||||
cache cache.Cache
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Init() {
|
||||
@ -113,7 +113,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
|
||||
}
|
||||
log.Debug(triggerID, "msg storage length", len(storageMsgList), "push length", len(notStoragePushMsgList))
|
||||
if len(storageMsgList) > 0 {
|
||||
lastSeq, err := och.msgInterface.BatchInsertChat2Cache(ctx, msgChannelValue.aggregationID, storageMsgList)
|
||||
lastSeq, err := och.msgDatabase.BatchInsertChat2Cache(ctx, msgChannelValue.aggregationID, storageMsgList)
|
||||
if err != nil {
|
||||
och.singleMsgFailedCountMutex.Lock()
|
||||
och.singleMsgFailedCount += uint64(len(storageMsgList))
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
|
||||
type OnlineHistoryMongoConsumerHandler struct {
|
||||
historyConsumerGroup *kfk.MConsumerGroup
|
||||
msgInterface controller.MsgInterface
|
||||
msgDatabase controller.MsgDatabase
|
||||
cache cache.Cache
|
||||
}
|
||||
|
||||
@ -39,12 +39,12 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con
|
||||
ctx := context.Background()
|
||||
tracelog.SetOperationID(ctx, msgFromMQ.TriggerID)
|
||||
//err = db.DB.BatchInsertChat2DB(msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.TriggerID, msgFromMQ.LastSeq)
|
||||
err = mc.msgInterface.BatchInsertChat2DB(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.LastSeq)
|
||||
err = mc.msgDatabase.BatchInsertChat2DB(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList, msgFromMQ.LastSeq)
|
||||
if err != nil {
|
||||
log.NewError(msgFromMQ.TriggerID, "single data insert to mongo err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID)
|
||||
}
|
||||
//err = db.DB.DeleteMessageFromCache(msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.GetTriggerID())
|
||||
err = mc.msgInterface.DeleteMessageFromCache(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList)
|
||||
err = mc.msgDatabase.DeleteMessageFromCache(ctx, msgFromMQ.AggregationID, msgFromMQ.MessageList)
|
||||
if err != nil {
|
||||
log.NewError(msgFromMQ.TriggerID, "remove cache msg from redis err", err.Error(), msgFromMQ.MessageList, msgFromMQ.AggregationID, msgFromMQ.TriggerID)
|
||||
}
|
||||
@ -62,8 +62,8 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(cMsg *sarama.Con
|
||||
log.NewError(msgFromMQ.TriggerID, "deleteMessageTips unmarshal err:", err.Error(), v.String())
|
||||
continue
|
||||
}
|
||||
if totalUnExistSeqs, err := mc.msgInterface.DelMsgBySeqs(ctx, DeleteMessageTips.UserID, DeleteMessageTips.SeqList); err != nil {
|
||||
log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", DeleteMessageTips.UserID, DeleteMessageTips.SeqList, "error:", err.Error(), "totalUnExistSeqs: ", totalUnExistSeqs)
|
||||
if totalUnExistSeqs, err := mc.msgDatabase.DelMsgBySeqs(ctx, DeleteMessageTips.UserID, DeleteMessageTips.Seqs); err != nil {
|
||||
log.NewError(v.OperationID, utils.GetSelfFuncName(), "DelMsgBySeqs args: ", DeleteMessageTips.UserID, DeleteMessageTips.Seqs, "error:", err.Error(), "totalUnExistSeqs: ", totalUnExistSeqs)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"Open_IM/pkg/common/tracelog"
|
||||
"Open_IM/pkg/utils/splitter"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"sync"
|
||||
|
||||
"Open_IM/pkg/utils"
|
||||
"context"
|
||||
@ -64,13 +65,17 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
|
||||
maxNum := 999
|
||||
if len(userIDs) > maxNum {
|
||||
s := splitter.NewSplitter(maxNum, userIDs)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(s.GetSplitResult()))
|
||||
for i, v := range s.GetSplitResult() {
|
||||
go func(index int, userIDs []string) {
|
||||
defer wg.Done()
|
||||
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
|
||||
log.NewError(tracelog.GetOperationID(ctx), "batchPush failed", i, token, pushReq)
|
||||
}
|
||||
}(i, v.Item)
|
||||
}
|
||||
wg.Wait()
|
||||
} else {
|
||||
err = g.batchPush(ctx, token, userIDs, pushReq)
|
||||
}
|
||||
|
@ -25,9 +25,12 @@ type Push struct {
|
||||
successCount uint64
|
||||
}
|
||||
|
||||
func (p *Push) Init(rpcPort int) {
|
||||
var cacheInterface cache.Cache
|
||||
|
||||
func (p *Push) Init(rpcPort int) error {
|
||||
redisClient, err := cache.NewRedis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var cacheInterface cache.Cache = redisClient
|
||||
p.rpcServer.Init(rpcPort, cacheInterface)
|
||||
p.pushCh.Init()
|
||||
statistics.NewStatistics(&p.successCount, config.Config.ModuleName.PushName, fmt.Sprintf("%d second push to msg_gateway count", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
|
||||
@ -40,6 +43,7 @@ func (p *Push) Init(rpcPort int) {
|
||||
if config.Config.Push.Fcm.Enable {
|
||||
p.offlinePusher = fcm.NewClient(cacheInterface)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Push) initPrometheus() {
|
||||
|
@ -11,15 +11,18 @@ import (
|
||||
"Open_IM/pkg/common/constant"
|
||||
kfk "Open_IM/pkg/common/kafka"
|
||||
"Open_IM/pkg/common/log"
|
||||
"Open_IM/pkg/common/tracelog"
|
||||
pbChat "Open_IM/pkg/proto/msg"
|
||||
pbPush "Open_IM/pkg/proto/push"
|
||||
"Open_IM/pkg/utils"
|
||||
"context"
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
type ConsumerHandler struct {
|
||||
pushConsumerGroup *kfk.MConsumerGroup
|
||||
pusher Pusher
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) Init() {
|
||||
@ -27,6 +30,7 @@ func (c *ConsumerHandler) Init() {
|
||||
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ms2pschat.Topic}, config.Config.Kafka.Ms2pschat.Addr,
|
||||
config.Config.Kafka.ConsumerGroupID.MsgToPush)
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) handleMs2PsChat(msg []byte) {
|
||||
log.NewDebug("", "msg come from kafka And push!!!", "msg", string(msg))
|
||||
msgFromMQ := pbChat.PushMsgDataToMQ{}
|
||||
@ -43,11 +47,17 @@ func (c *ConsumerHandler) handleMs2PsChat(msg []byte) {
|
||||
if nowSec-sec > 10 {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
tracelog.SetOperationID(ctx, "")
|
||||
var err error
|
||||
switch msgFromMQ.MsgData.SessionType {
|
||||
case constant.SuperGroupChatType:
|
||||
MsgToSuperGroupUser(pbData)
|
||||
err = c.pusher.MsgToSuperGroupUser(ctx, pbData.SourceID, pbData.MsgData)
|
||||
default:
|
||||
MsgToUser(pbData)
|
||||
err = c.pusher.MsgToUser(ctx, pbData.SourceID, pbData.MsgData)
|
||||
}
|
||||
if err != nil {
|
||||
log.NewError("", "push failed", *pbData)
|
||||
}
|
||||
}
|
||||
func (ConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
@ -21,16 +21,13 @@ import (
|
||||
type RPCServer struct {
|
||||
rpcPort int
|
||||
rpcRegisterName string
|
||||
etcdSchema string
|
||||
etcdAddr []string
|
||||
push controller.PushInterface
|
||||
pushInterface controller.PushInterface
|
||||
pusher Pusher
|
||||
}
|
||||
|
||||
func (r *RPCServer) Init(rpcPort int, cache cache.Cache) {
|
||||
r.rpcPort = rpcPort
|
||||
r.rpcRegisterName = config.Config.RpcRegisterName.OpenImPushName
|
||||
r.etcdSchema = config.Config.Etcd.EtcdSchema
|
||||
r.etcdAddr = config.Config.Etcd.EtcdAddr
|
||||
}
|
||||
|
||||
func (r *RPCServer) run() {
|
||||
@ -84,13 +81,13 @@ func (r *RPCServer) run() {
|
||||
func (r *RPCServer) PushMsg(ctx context.Context, pbData *pbPush.PushMsgReq) (resp *pbPush.PushMsgResp, err error) {
|
||||
switch pbData.MsgData.SessionType {
|
||||
case constant.SuperGroupChatType:
|
||||
MsgToSuperGroupUser(pbData)
|
||||
err = r.pusher.MsgToSuperGroupUser(ctx, pbData.SourceID, pbData.MsgData)
|
||||
default:
|
||||
MsgToUser(pbData)
|
||||
err = r.pusher.MsgToUser(ctx, pbData.SourceID, pbData.MsgData)
|
||||
}
|
||||
return &pbPush.PushMsgResp{}, nil
|
||||
return &pbPush.PushMsgResp{}, err
|
||||
}
|
||||
|
||||
func (r *RPCServer) DelUserPushToken(ctx context.Context, req *pbPush.DelUserPushTokenReq) (resp *pbPush.DelUserPushTokenResp, err error) {
|
||||
return &pbPush.DelUserPushTokenResp{}, r.push.DelFcmToken(ctx, req.UserID, int(req.PlatformID))
|
||||
return &pbPush.DelUserPushTokenResp{}, r.pushInterface.DelFcmToken(ctx, req.UserID, int(req.PlatformID))
|
||||
}
|
||||
|
@ -79,7 +79,6 @@ func (p *Pusher) MsgToUser(ctx context.Context, userID string, msg *sdkws.MsgDat
|
||||
}
|
||||
err = p.OfflinePushMsg(ctx, userID, msg, userIDs)
|
||||
if err != nil {
|
||||
log.NewError(operationID, "OfflinePushMsg failed", userID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
package push
|
||||
|
||||
import (
|
||||
"Open_IM/internal/push/sdk/tpns-server-sdk-go/go/auth"
|
||||
"Open_IM/pkg/common/config"
|
||||
)
|
||||
|
||||
var badgeType = -2
|
||||
var iosAcceptId = auth.Auther{AccessID: config.Config.Push.Tpns.Ios.AccessID, SecretKey: config.Config.Push.Tpns.Ios.SecretKey}
|
||||
|
||||
func IOSAccountListPush(accounts []string, title, content, jsonCustomContent string) {
|
||||
var iosMessage = tpns.Message{
|
||||
Title: title,
|
||||
Content: content,
|
||||
IOS: &tpns.IOSParams{
|
||||
Aps: &tpns.Aps{
|
||||
BadgeType: &badgeType,
|
||||
Sound: "default",
|
||||
Category: "INVITE_CATEGORY",
|
||||
},
|
||||
CustomContent: jsonCustomContent,
|
||||
//CustomContent: `"{"key\":\"value\"}"`,
|
||||
},
|
||||
}
|
||||
pushReq, reqBody, err := req.NewListAccountPush(accounts, iosMessage)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
iosAcceptId.Auth(pushReq, auth.UseSignAuthored, iosAcceptId, reqBody)
|
||||
common.PushAndGetResult(pushReq)
|
||||
}
|
@ -51,7 +51,7 @@ func (s *friendServer) ApplyToAddFriend(ctx context.Context, req *pbfriend.Apply
|
||||
if err := tokenverify.CheckAccessV3(ctx, req.FromUserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CallbackBeforeAddFriend(ctx, req); err != nil {
|
||||
if err := CallbackBeforeAddFriend(ctx, req); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
if req.ToUserID == req.FromUserID {
|
||||
|
@ -142,7 +142,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbGroup.CreateGroupR
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CallbackBeforeCreateGroup(ctx, req); err != nil {
|
||||
if err := CallbackBeforeCreateGroup(ctx, req); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
var groupMembers []*relationTb.GroupMemberModel
|
||||
@ -158,7 +158,7 @@ func (s *groupServer) CreateGroup(ctx context.Context, req *pbGroup.CreateGroupR
|
||||
groupMember.OperatorUserID = tracelog.GetOpUserID(ctx)
|
||||
groupMember.JoinSource = constant.JoinByInvitation
|
||||
groupMember.InviterUserID = tracelog.GetOpUserID(ctx)
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, groupMember, group.Ex); err != nil {
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, groupMember, group.Ex); err != nil && err != constant.ErrCallbackContinue {
|
||||
return err
|
||||
}
|
||||
groupMembers = append(groupMembers, groupMember)
|
||||
@ -318,7 +318,7 @@ func (s *groupServer) InviteUserToGroup(ctx context.Context, req *pbGroup.Invite
|
||||
member.OperatorUserID = opUserID
|
||||
member.InviterUserID = opUserID
|
||||
member.JoinSource = constant.JoinByInvitation
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, member, group.Ex); err != nil {
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, member, group.Ex); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
groupMembers = append(groupMembers, member)
|
||||
@ -601,7 +601,7 @@ func (s *groupServer) GroupApplicationResponse(ctx context.Context, req *pbGroup
|
||||
OperatorUserID: tracelog.GetOpUserID(ctx),
|
||||
Ex: groupRequest.Ex,
|
||||
}
|
||||
if err = CallbackBeforeMemberJoinGroup(ctx, member, group.Ex); err != nil {
|
||||
if err = CallbackBeforeMemberJoinGroup(ctx, member, group.Ex); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -645,7 +645,7 @@ func (s *groupServer) JoinGroup(ctx context.Context, req *pbGroup.JoinGroupReq)
|
||||
groupMember.OperatorUserID = tracelog.GetOpUserID(ctx)
|
||||
groupMember.JoinSource = constant.JoinByInvitation
|
||||
groupMember.InviterUserID = tracelog.GetOpUserID(ctx)
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, groupMember, group.Ex); err != nil {
|
||||
if err := CallbackBeforeMemberJoinGroup(ctx, groupMember, group.Ex); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.GroupDatabase.CreateGroup(ctx, nil, []*relationTb.GroupMemberModel{groupMember}); err != nil {
|
||||
|
@ -16,7 +16,7 @@ func (m *msgServer) sendMsgSuperGroupChat(ctx context.Context, req *msg.SendMsgR
|
||||
resp = &msg.SendMsgResp{}
|
||||
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgRecvSuccessCounter)
|
||||
// callback
|
||||
if err = CallbackBeforeSendGroupMsg(ctx, req); err != nil {
|
||||
if err = CallbackBeforeSendGroupMsg(ctx, req); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ func (m *msgServer) sendMsgNotification(ctx context.Context, req *msg.SendMsgReq
|
||||
|
||||
func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *msg.SendMsgReq) (resp *msg.SendMsgResp, err error) {
|
||||
promePkg.PromeInc(promePkg.SingleChatMsgRecvSuccessCounter)
|
||||
if err = CallbackBeforeSendSingleMsg(ctx, req); err != nil {
|
||||
if err = CallbackBeforeSendSingleMsg(ctx, req); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
_, err = m.messageVerification(ctx, req)
|
||||
@ -86,7 +86,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *msg.SendMsgReq)
|
||||
}
|
||||
}
|
||||
err = CallbackAfterSendSingleMsg(ctx, req)
|
||||
if err != nil {
|
||||
if err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
promePkg.PromeInc(promePkg.SingleChatMsgProcessSuccessCounter)
|
||||
@ -100,7 +100,7 @@ func (m *msgServer) sendMsgGroupChat(ctx context.Context, req *msg.SendMsgReq) (
|
||||
// callback
|
||||
promePkg.PromeInc(promePkg.GroupChatMsgRecvSuccessCounter)
|
||||
err = CallbackBeforeSendGroupMsg(ctx, req)
|
||||
if err != nil {
|
||||
if err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -235,7 +235,7 @@ func (m *msgServer) SendMsg(ctx context.Context, req *msg.SendMsgReq) (resp *msg
|
||||
return nil, constant.ErrMessageHasReadDisable.Wrap()
|
||||
}
|
||||
m.encapsulateMsgData(req.MsgData)
|
||||
if err := CallbackMsgModify(ctx, req); err != nil {
|
||||
if err := CallbackMsgModify(ctx, req); err != nil && err != constant.ErrCallbackContinue {
|
||||
return nil, err
|
||||
}
|
||||
switch req.MsgData.SessionType {
|
||||
|
10
pkg/common/db/cache/redis.go
vendored
10
pkg/common/db/cache/redis.go
vendored
@ -59,7 +59,7 @@ type Cache interface {
|
||||
|
||||
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
|
||||
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
GetMessageListBySeq(ctx context.Context, userID string, seqList []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
GetMessagesBySeq(ctx context.Context, userID string, seqList []int64) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err error)
|
||||
SetMessageToCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) (int, error)
|
||||
DeleteMessageFromCache(ctx context.Context, userID string, msgList []*pbChat.MsgDataToMQ) error
|
||||
CleanUpOneUserAllMsg(ctx context.Context, userID string) error
|
||||
@ -213,13 +213,13 @@ func (r *RedisClient) SetGroupMinSeq(ctx context.Context, groupID string, minSeq
|
||||
}
|
||||
|
||||
// Store userid and platform class to redis
|
||||
func (r *RedisClient) AddTokenFlag(ctx context.Context, userID string, platform string, token string, flag int) error {
|
||||
key := uidPidToken + userID + ":" + platform
|
||||
func (r *RedisClient) AddTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
key := uidPidToken + userID + ":" + constant.PlatformIDToName(platformID)
|
||||
return r.rdb.HSet(context.Background(), key, token, flag).Err()
|
||||
}
|
||||
|
||||
//key:userID+platform-> <token, flag>
|
||||
func (r *RedisClient) GetTokenMapByUidPid(ctx context.Context, userID, platformID string) (map[string]int, error) {
|
||||
func (r *RedisClient) GetTokenMapByUidPid(ctx context.Context, userID, platformID int) (map[string]int, error) {
|
||||
key := uidPidToken + userID + ":" + platformID
|
||||
m, err := r.rdb.HGetAll(context.Background(), key).Result()
|
||||
mm := make(map[string]int)
|
||||
@ -256,7 +256,7 @@ func (r *RedisClient) DeleteTokenByUidPid(ctx context.Context, userID string, pl
|
||||
return r.rdb.HDel(context.Background(), key, fields...).Err()
|
||||
}
|
||||
|
||||
func (r *RedisClient) GetMessageListBySeq(ctx context.Context, userID string, seqList []int64, operationID string) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err2 error) {
|
||||
func (r *RedisClient) GetMessagesBySeq(ctx context.Context, userID string, seqList []int64, operationID string) (seqMsg []*sdkws.MsgData, failedSeqList []int64, err2 error) {
|
||||
for _, v := range seqList {
|
||||
//MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
|
||||
key := messageCache + userID + "_" + strconv.Itoa(int(v))
|
||||
|
@ -161,7 +161,6 @@ type MsgDatabaseInterface interface {
|
||||
GetUserMinSeq(ctx context.Context, userID string) (int64, error)
|
||||
GetGroupMaxSeq(ctx context.Context, groupID string) (int64, error)
|
||||
GetGroupMinSeq(ctx context.Context, groupID string) (int64, error)
|
||||
GetMessageListBySeq(ctx context.Context, userID string, seqs []int64) ([]*sdkws.MsgData, error)
|
||||
}
|
||||
type MsgDatabase struct {
|
||||
mgo unRelationTb.MsgDocModelInterface
|
||||
@ -249,11 +248,6 @@ func (db *MsgDatabase) GetGroupMinSeq(ctx context.Context, groupID string) (int6
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (db *MsgDatabase) GetMessageListBySeq(ctx context.Context, userID string, seqs []int64) ([]*sdkws.MsgData, error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewMsgDatabase(mgo *mongo.Client, rdb redis.UniversalClient) MsgDatabaseInterface {
|
||||
return &MsgDatabase{}
|
||||
}
|
||||
@ -530,7 +524,7 @@ func (db *MsgDatabase) getMsgBySeqs(ctx context.Context, sourceID string, seqs [
|
||||
}
|
||||
|
||||
func (db *MsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, seqs []int64) (seqMsg []*sdkws.MsgData, err error) {
|
||||
successMsgs, failedSeqs, err := db.cache.GetMessageListBySeq(ctx, userID, seqs)
|
||||
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, userID, seqs)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
|
||||
@ -551,7 +545,7 @@ func (db *MsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, seqs []i
|
||||
}
|
||||
|
||||
func (db *MsgDatabase) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []int64) (seqMsg []*sdkws.MsgData, err error) {
|
||||
successMsgs, failedSeqs, err := db.cache.GetMessageListBySeq(ctx, groupID, seqs)
|
||||
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, groupID, seqs)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
prome.PromeAdd(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
|
||||
|
@ -1,7 +1,9 @@
|
||||
package localcache
|
||||
|
||||
import (
|
||||
discoveryRegistry "Open_IM/pkg/discoveryregistry"
|
||||
"Open_IM/pkg/common/config"
|
||||
"Open_IM/pkg/discoveryregistry"
|
||||
"Open_IM/pkg/proto/conversation"
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
@ -13,10 +15,10 @@ type ConversationLocalCacheInterface interface {
|
||||
type ConversationLocalCache struct {
|
||||
lock sync.Mutex
|
||||
SuperGroupRecvMsgNotNotifyUserIDs map[string][]string
|
||||
client discoveryRegistry.SvcDiscoveryRegistry
|
||||
client discoveryregistry.SvcDiscoveryRegistry
|
||||
}
|
||||
|
||||
func NewConversationLocalCache(client discoveryRegistry.SvcDiscoveryRegistry) ConversationLocalCache {
|
||||
func NewConversationLocalCache(client discoveryregistry.SvcDiscoveryRegistry) ConversationLocalCache {
|
||||
return ConversationLocalCache{
|
||||
SuperGroupRecvMsgNotNotifyUserIDs: make(map[string][]string, 0),
|
||||
client: client,
|
||||
@ -24,5 +26,16 @@ func NewConversationLocalCache(client discoveryRegistry.SvcDiscoveryRegistry) Co
|
||||
}
|
||||
|
||||
func (g *ConversationLocalCache) GetRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
conn, err := g.client.GetConn(config.Config.RpcRegisterName.OpenImConversationName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := conversation.NewConversationClient(conn)
|
||||
resp, err := client.GetRecvMsgNotNotifyUserIDs(ctx, &conversation.GetRecvMsgNotNotifyUserIDsReq{
|
||||
GroupID: groupID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.UserIDs, nil
|
||||
}
|
||||
|
@ -100,5 +100,5 @@ func (c *ConversationGorm) FindRecvMsgNotNotifyUserIDs(ctx context.Context, grou
|
||||
defer func() {
|
||||
tracelog.SetCtxDebug(ctx, utils.GetFuncName(1), err, "groupID", groupID, "userIDs", userIDs)
|
||||
}()
|
||||
return userIDs, utils.Wrap(c.DB.Model(&relation.ConversationModel{}).Where("group_id = ? and recv_msg_opt", groupID, constant.ReceiveNotNotifyMessage).Pluck("user_id", &userIDs).Error, "")
|
||||
return userIDs, utils.Wrap(c.DB.Model(&relation.ConversationModel{}).Where("group_id = ? and recv_msg_opt = ?", groupID, constant.ReceiveNotNotifyMessage).Pluck("user_id", &userIDs).Error, "")
|
||||
}
|
||||
|
@ -63,10 +63,10 @@ func (p *Producer) SendMessage(m proto.Message, key string, operationID string)
|
||||
log.Error(operationID, "kMsg.Key.Length() == 0 || kMsg.Value.Length() == 0 ", kMsg)
|
||||
return -1, -1, errors.New("key or value == 0")
|
||||
}
|
||||
a, b, c := p.producer.SendMessage(kMsg)
|
||||
partition, offset, err := p.producer.SendMessage(kMsg)
|
||||
log.Info(operationID, "ByteEncoder SendMessage end", "key ", kMsg.Key.Length(), kMsg.Value.Length(), p.producer)
|
||||
if c == nil {
|
||||
if err == nil {
|
||||
prome.PromeInc(prome.SendMsgCounter)
|
||||
}
|
||||
return a, b, utils.Wrap(c, "")
|
||||
return partition, offset, utils.Wrap(err, "")
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
func UnaryServerInterceptorProme(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
func UnaryServerInterceptorPrometheus(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
remote, _ := peer.FromContext(ctx)
|
||||
remoteAddr := remote.Addr.String()
|
||||
|
||||
|
@ -9,37 +9,4 @@ type SvcDiscoveryRegistry interface {
|
||||
UnRegister() error
|
||||
GetConns(serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error)
|
||||
GetConn(serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
|
||||
|
||||
//RegisterConf(conf []byte) error
|
||||
//LoadConf() ([]byte, error)
|
||||
}
|
||||
|
||||
//func registerConf(key, conf string) {
|
||||
// etcdAddr := strings.Join(config.Config.Etcd.EtcdAddr, ",")
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: strings.Split(etcdAddr, ","), DialTimeout: 5 * time.Second})
|
||||
//
|
||||
// if err != nil {
|
||||
// panic(err.Error())
|
||||
// }
|
||||
// //lease
|
||||
// if _, err := cli.Put(context.Background(), key, conf); err != nil {
|
||||
// fmt.Println("panic, params: ")
|
||||
// panic(err.Error())
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func RegisterConf() {
|
||||
// bytes, err := yaml.Marshal(config.Config)
|
||||
// if err != nil {
|
||||
// panic(err.Error())
|
||||
// }
|
||||
// secretMD5 := utils.Md5(config.Config.Etcd.Secret)
|
||||
// confBytes, err := utils.AesEncrypt(bytes, []byte(secretMD5[0:16]))
|
||||
// if err != nil {
|
||||
// panic(err.Error())
|
||||
// }
|
||||
// fmt.Println("start register", secretMD5, getcdv3.GetPrefix(config.Config.Etcd.EtcdSchema, config.ConfName))
|
||||
// registerConf(getcdv3.GetPrefix(config.Config.Etcd.EtcdSchema, config.ConfName), string(confBytes))
|
||||
// fmt.Println("etcd register conf ok")
|
||||
//}
|
||||
|
@ -1,16 +0,0 @@
|
||||
package discoveryregistry
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
type Robin struct {
|
||||
next int
|
||||
}
|
||||
|
||||
func (r *Robin) Robin(slice []*grpc.ClientConn) int {
|
||||
index := r.next
|
||||
r.next += 1
|
||||
if r.next > len(slice)-1 {
|
||||
r.next = 0
|
||||
}
|
||||
return index
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user