mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-11-02 17:32:11 +08:00
feat: implement kafka producer and consumer.
This commit is contained in:
parent
0a71bef823
commit
d4669d9ab0
@ -22,6 +22,8 @@ toRedisGroupID: redis
|
|||||||
toMongoGroupID: mongo
|
toMongoGroupID: mongo
|
||||||
# Consumer group ID for push notifications topic
|
# Consumer group ID for push notifications topic
|
||||||
toPushGroupID: push
|
toPushGroupID: push
|
||||||
|
# Consumer group ID for offline push notifications topic
|
||||||
|
toOfflinePushGroupID: offlinePush
|
||||||
# TLS (Transport Layer Security) configuration
|
# TLS (Transport Layer Security) configuration
|
||||||
tls:
|
tls:
|
||||||
# Enable or disable TLS
|
# Enable or disable TLS
|
||||||
|
|||||||
@ -7,9 +7,7 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
|
||||||
pbpush "github.com/openimsdk/protocol/push"
|
pbpush "github.com/openimsdk/protocol/push"
|
||||||
"github.com/openimsdk/tools/db/mongoutil"
|
|
||||||
"github.com/openimsdk/tools/db/redisutil"
|
"github.com/openimsdk/tools/db/redisutil"
|
||||||
"github.com/openimsdk/tools/discovery"
|
"github.com/openimsdk/tools/discovery"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -25,7 +23,6 @@ type pushServer struct {
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
RpcConfig config.Push
|
RpcConfig config.Push
|
||||||
RedisConfig config.Redis
|
RedisConfig config.Redis
|
||||||
MongodbConfig config.Mongo
|
|
||||||
KafkaConfig config.Kafka
|
KafkaConfig config.Kafka
|
||||||
NotificationConfig config.Notification
|
NotificationConfig config.Notification
|
||||||
Share config.Share
|
Share config.Share
|
||||||
@ -49,10 +46,6 @@ func (p pushServer) DelUserPushToken(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -62,29 +55,9 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
database := controller.NewPushDatabase(cacheModel)
|
database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
|
||||||
msgModel := redis.NewMsgCache(rdb)
|
|
||||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
|
|
||||||
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
|
||||||
|
|
||||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
consumer, err := NewConsumerHandler(config, database, offlinePusher, rdb, client)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
consumer, err := NewConsumerHandler(config, msgDatabase, offlinePusher, rdb, client)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,7 +33,7 @@ type ConsumerHandler struct {
|
|||||||
pushConsumerGroup *kafka.MConsumerGroup
|
pushConsumerGroup *kafka.MConsumerGroup
|
||||||
offlinePusher offlinepush.OfflinePusher
|
offlinePusher offlinepush.OfflinePusher
|
||||||
onlinePusher OnlinePusher
|
onlinePusher OnlinePusher
|
||||||
msgDatabase controller.CommonMsgDatabase
|
pushDatabase controller.PushDatabase
|
||||||
onlineCache *rpccache.OnlineCache
|
onlineCache *rpccache.OnlineCache
|
||||||
groupLocalCache *rpccache.GroupLocalCache
|
groupLocalCache *rpccache.GroupLocalCache
|
||||||
conversationLocalCache *rpccache.ConversationLocalCache
|
conversationLocalCache *rpccache.ConversationLocalCache
|
||||||
@ -44,15 +44,16 @@ type ConsumerHandler struct {
|
|||||||
config *Config
|
config *Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConsumerHandler(config *Config, database controller.CommonMsgDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
|
func NewConsumerHandler(config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
|
||||||
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
|
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
|
||||||
var consumerHandler ConsumerHandler
|
var consumerHandler ConsumerHandler
|
||||||
var err error
|
var err error
|
||||||
consumerHandler.pushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToPushGroupID,
|
consumerHandler.pushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToPushGroupID,
|
||||||
[]string{config.KafkaConfig.ToPushTopic}, true)
|
[]string{config.KafkaConfig.ToPushTopic, config.KafkaConfig.ToOfflinePushTopic}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||||
consumerHandler.offlinePusher = offlinePusher
|
consumerHandler.offlinePusher = offlinePusher
|
||||||
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
||||||
@ -63,8 +64,7 @@ func NewConsumerHandler(config *Config, database controller.CommonMsgDatabase, o
|
|||||||
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
|
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
|
||||||
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
||||||
consumerHandler.config = config
|
consumerHandler.config = config
|
||||||
consumerHandler.msgDatabase = database
|
consumerHandler.pushDatabase = database
|
||||||
//
|
|
||||||
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
|
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
|
||||||
return &consumerHandler, nil
|
return &consumerHandler, nil
|
||||||
}
|
}
|
||||||
@ -85,14 +85,6 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
|||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if len(msgFromMQ.GetUserIDs()) > 0 {
|
|
||||||
err := c.offlinePushMsg(ctx, msgFromMQ.MsgData, msgFromMQ.UserIDs)
|
|
||||||
if err != nil {
|
|
||||||
log.ZWarn(ctx, "offline push failed", err, "msg", msgFromMQ.String())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch msgFromMQ.MsgData.SessionType {
|
switch msgFromMQ.MsgData.SessionType {
|
||||||
case constant.ReadGroupChatType:
|
case constant.ReadGroupChatType:
|
||||||
err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData)
|
err = c.Push2Group(ctx, msgFromMQ.MsgData.GroupID, msgFromMQ.MsgData)
|
||||||
@ -111,6 +103,19 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
|
||||||
|
offlinePushMsg := pbpush.PushMsgReq{}
|
||||||
|
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
|
||||||
|
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := c.offlinePushMsg(ctx, offlinePushMsg.MsgData, offlinePushMsg.UserIDs)
|
||||||
|
if err != nil {
|
||||||
|
log.ZWarn(ctx, "offline push failed", err, "msg", offlinePushMsg.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||||
|
|
||||||
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||||
@ -118,7 +123,12 @@ func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil
|
|||||||
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||||
for msg := range claim.Messages() {
|
for msg := range claim.Messages() {
|
||||||
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
|
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
|
||||||
c.handleMs2PsChat(ctx, msg.Value)
|
switch msg.Topic {
|
||||||
|
case c.config.KafkaConfig.ToPushTopic:
|
||||||
|
c.handleMs2PsChat(ctx, msg.Value)
|
||||||
|
case c.config.KafkaConfig.ToOfflinePushTopic:
|
||||||
|
c.handleMsg2OfflinePush(ctx, msg.Value)
|
||||||
|
}
|
||||||
sess.MarkMessage(msg, "")
|
sess.MarkMessage(msg, "")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -262,18 +272,12 @@ func (c *ConsumerHandler) asyncOfflinePush(ctx context.Context, needOfflinePushU
|
|||||||
if len(offlinePushUserIDs) > 0 {
|
if len(offlinePushUserIDs) > 0 {
|
||||||
needOfflinePushUserIDs = offlinePushUserIDs
|
needOfflinePushUserIDs = offlinePushUserIDs
|
||||||
}
|
}
|
||||||
if err := c.msgDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil {
|
if err := c.pushDatabase.MsgToOfflinePushMQ(ctx, conversationutil.GenConversationUniqueKeyForSingle(msg.SendID, msg.RecvID), needOfflinePushUserIDs, msg); err != nil {
|
||||||
prommetrics.SingleChatMsgProcessFailedCounter.Inc()
|
prommetrics.SingleChatMsgProcessFailedCounter.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConsumerHandler) handleMsg2OfflinePush(ctx context.Context, needOfflinePushUserIDs []string, msg *sdkws.MsgData) {
|
|
||||||
if err := c.offlinePushMsg(ctx, msg, needOfflinePushUserIDs); err != nil {
|
|
||||||
log.ZWarn(ctx, "offlinePushMsg failed", err, "needOfflinePushUserIDs", needOfflinePushUserIDs, "msg", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) {
|
func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID string, pushToUserIDs *[]string, msg *sdkws.MsgData) (err error) {
|
||||||
if len(*pushToUserIDs) == 0 {
|
if len(*pushToUserIDs) == 0 {
|
||||||
*pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID)
|
*pushToUserIDs, err = c.groupLocalCache.GetGroupMemberIDs(ctx, groupID)
|
||||||
|
|||||||
@ -73,19 +73,21 @@ type Mongo struct {
|
|||||||
MaxRetry int `mapstructure:"maxRetry"`
|
MaxRetry int `mapstructure:"maxRetry"`
|
||||||
}
|
}
|
||||||
type Kafka struct {
|
type Kafka struct {
|
||||||
Username string `mapstructure:"username"`
|
Username string `mapstructure:"username"`
|
||||||
Password string `mapstructure:"password"`
|
Password string `mapstructure:"password"`
|
||||||
ProducerAck string `mapstructure:"producerAck"`
|
ProducerAck string `mapstructure:"producerAck"`
|
||||||
CompressType string `mapstructure:"compressType"`
|
CompressType string `mapstructure:"compressType"`
|
||||||
Address []string `mapstructure:"address"`
|
Address []string `mapstructure:"address"`
|
||||||
ToRedisTopic string `mapstructure:"toRedisTopic"`
|
ToRedisTopic string `mapstructure:"toRedisTopic"`
|
||||||
ToMongoTopic string `mapstructure:"toMongoTopic"`
|
ToMongoTopic string `mapstructure:"toMongoTopic"`
|
||||||
ToPushTopic string `mapstructure:"toPushTopic"`
|
ToPushTopic string `mapstructure:"toPushTopic"`
|
||||||
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
|
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
|
||||||
ToRedisGroupID string `mapstructure:"toRedisGroupID"`
|
ToRedisGroupID string `mapstructure:"toRedisGroupID"`
|
||||||
ToMongoGroupID string `mapstructure:"toMongoGroupID"`
|
ToMongoGroupID string `mapstructure:"toMongoGroupID"`
|
||||||
ToPushGroupID string `mapstructure:"toPushGroupID"`
|
ToPushGroupID string `mapstructure:"toPushGroupID"`
|
||||||
Tls TLSConfig `mapstructure:"tls"`
|
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"`
|
||||||
|
|
||||||
|
Tls TLSConfig `mapstructure:"tls"`
|
||||||
}
|
}
|
||||||
type TLSConfig struct {
|
type TLSConfig struct {
|
||||||
EnableTLS bool `mapstructure:"enableTLS"`
|
EnableTLS bool `mapstructure:"enableTLS"`
|
||||||
|
|||||||
@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||||
"github.com/openimsdk/protocol/constant"
|
"github.com/openimsdk/protocol/constant"
|
||||||
pbmsg "github.com/openimsdk/protocol/msg"
|
pbmsg "github.com/openimsdk/protocol/msg"
|
||||||
"github.com/openimsdk/protocol/push"
|
|
||||||
"github.com/openimsdk/protocol/sdkws"
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
"github.com/openimsdk/tools/errs"
|
"github.com/openimsdk/tools/errs"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
@ -93,7 +92,6 @@ type CommonMsgDatabase interface {
|
|||||||
// to mq
|
// to mq
|
||||||
MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error
|
MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error
|
||||||
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
|
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
|
||||||
MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error
|
|
||||||
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
|
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
|
||||||
|
|
||||||
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
|
RangeUserSendCount(ctx context.Context, start time.Time, end time.Time, group bool, ase bool, pageNumber int32, showNumber int32) (msgCount int64, userCount int64, users []*model.UserCount, dateCount map[string]int64, err error)
|
||||||
@ -124,32 +122,27 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &commonMsgDatabase{
|
return &commonMsgDatabase{
|
||||||
msgDocDatabase: msgDocModel,
|
msgDocDatabase: msgDocModel,
|
||||||
msg: msg,
|
msg: msg,
|
||||||
seqUser: seqUser,
|
seqUser: seqUser,
|
||||||
seqConversation: seqConversation,
|
seqConversation: seqConversation,
|
||||||
producer: producerToRedis,
|
producer: producerToRedis,
|
||||||
producerToMongo: producerToMongo,
|
producerToMongo: producerToMongo,
|
||||||
producerToPush: producerToPush,
|
producerToPush: producerToPush,
|
||||||
producerToOfflinePush: producerToOfflinePush,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type commonMsgDatabase struct {
|
type commonMsgDatabase struct {
|
||||||
msgDocDatabase database.Msg
|
msgDocDatabase database.Msg
|
||||||
msgTable model.MsgDocModel
|
msgTable model.MsgDocModel
|
||||||
msg cache.MsgCache
|
msg cache.MsgCache
|
||||||
seqConversation cache.SeqConversationCache
|
seqConversation cache.SeqConversationCache
|
||||||
seqUser cache.SeqUser
|
seqUser cache.SeqUser
|
||||||
producer *kafka.Producer
|
producer *kafka.Producer
|
||||||
producerToMongo *kafka.Producer
|
producerToMongo *kafka.Producer
|
||||||
producerToPush *kafka.Producer
|
producerToPush *kafka.Producer
|
||||||
producerToOfflinePush *kafka.Producer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
|
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
|
||||||
@ -166,11 +159,6 @@ func (db *commonMsgDatabase) MsgToPushMQ(ctx context.Context, key, conversationI
|
|||||||
return partition, offset, nil
|
return partition, offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *commonMsgDatabase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
|
|
||||||
_, _, err := db.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
|
func (db *commonMsgDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
|
||||||
if len(messages) > 0 {
|
if len(messages) > 0 {
|
||||||
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
|
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
|
||||||
|
|||||||
@ -17,21 +17,41 @@ package controller
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||||
|
"github.com/openimsdk/protocol/push"
|
||||||
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
|
"github.com/openimsdk/tools/mq/kafka"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PushDatabase interface {
|
type PushDatabase interface {
|
||||||
DelFcmToken(ctx context.Context, userID string, platformID int) error
|
DelFcmToken(ctx context.Context, userID string, platformID int) error
|
||||||
|
MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type pushDataBase struct {
|
type pushDataBase struct {
|
||||||
cache cache.ThirdCache
|
cache cache.ThirdCache
|
||||||
|
producerToOfflinePush *kafka.Producer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPushDatabase(cache cache.ThirdCache) PushDatabase {
|
func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase {
|
||||||
return &pushDataBase{cache: cache}
|
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &pushDataBase{cache: cache,
|
||||||
|
producerToOfflinePush: producerToOfflinePush}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error {
|
func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformID int) error {
|
||||||
return p.cache.DelFcmToken(ctx, userID, platformID)
|
return p.cache.DelFcmToken(ctx, userID, platformID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
|
||||||
|
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user