mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-04-05 20:11:14 +08:00
resolving merge conflicts
This commit is contained in:
parent
1e54235263
commit
aeee3f33b1
4
go.sum
4
go.sum
@ -345,8 +345,8 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA
|
|||||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||||
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||||
github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrkXcDACCqtyM=
|
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
|
||||||
github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
|
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
|
||||||
github.com/openimsdk/protocol v0.0.72-alpha.79 h1:e46no8WVAsmTzyy405klrdoUiG7u+1ohDsXvQuFng4s=
|
github.com/openimsdk/protocol v0.0.72-alpha.79 h1:e46no8WVAsmTzyy405klrdoUiG7u+1ohDsXvQuFng4s=
|
||||||
github.com/openimsdk/protocol v0.0.72-alpha.79/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw=
|
github.com/openimsdk/protocol v0.0.72-alpha.79/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw=
|
||||||
github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc=
|
github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc=
|
||||||
|
@ -73,7 +73,7 @@ func (cm *ConfigManager) GetConfig(c *gin.Context) {
|
|||||||
func (cm *ConfigManager) GetConfigList(c *gin.Context) {
|
func (cm *ConfigManager) GetConfigList(c *gin.Context) {
|
||||||
var resp apistruct.GetConfigListResp
|
var resp apistruct.GetConfigListResp
|
||||||
resp.ConfigNames = cm.config.GetConfigNames()
|
resp.ConfigNames = cm.config.GetConfigNames()
|
||||||
resp.Environment = runtimeenv.PrintRuntimeEnvironment()
|
resp.Environment = runtimeenv.RuntimeEnvironment()
|
||||||
resp.Version = version.Version
|
resp.Version = version.Version
|
||||||
|
|
||||||
apiresp.GinSuccess(c, resp)
|
apiresp.GinSuccess(c, resp)
|
||||||
|
@ -56,7 +56,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
config.RuntimeEnv = runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv, []string{
|
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv, []string{
|
||||||
config.Discovery.RpcService.MessageGateway,
|
config.Discovery.RpcService.MessageGateway,
|
||||||
|
@ -223,15 +223,19 @@ func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *C
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if len(conns) == 0 || (len(conns) == 1 && ws.disCov.IsSelfNode(conns[0])) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
wg := errgroup.Group{}
|
wg := errgroup.Group{}
|
||||||
wg.SetLimit(concurrentRequest)
|
wg.SetLimit(concurrentRequest)
|
||||||
|
|
||||||
// Online push user online message to other node
|
// Online push user online message to other node
|
||||||
for _, v := range conns {
|
for _, v := range conns {
|
||||||
v := v
|
v := v
|
||||||
log.ZDebug(ctx, " sendUserOnlineInfoToOtherNode conn ", "target", v.Target())
|
log.ZDebug(ctx, "sendUserOnlineInfoToOtherNode conn")
|
||||||
if v.Target() == ws.disCov.GetSelfConnTarget() {
|
if ws.disCov.IsSelfNode(v) {
|
||||||
log.ZDebug(ctx, "Filter out this node", "node", v.Target())
|
log.ZDebug(ctx, "Filter out this node")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +246,7 @@ func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *C
|
|||||||
PlatformID: int32(client.PlatformID), Token: client.token,
|
PlatformID: int32(client.PlatformID), Token: client.token,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err, "node", v.Target())
|
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -74,7 +74,7 @@ type Config struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Start(ctx context.Context, index int, config *Config) error {
|
func Start(ctx context.Context, index int, config *Config) error {
|
||||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
runTimeEnv := runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
|
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
|
||||||
config.MsgTransfer.Prometheus.Ports, "index", index)
|
config.MsgTransfer.Prometheus.Ports, "index", index)
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/go-redis/redis"
|
"github.com/go-redis/redis"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/tools/batcher"
|
"github.com/openimsdk/open-im-server/v3/pkg/tools/batcher"
|
||||||
"github.com/openimsdk/protocol/constant"
|
"github.com/openimsdk/protocol/constant"
|
||||||
@ -37,7 +38,6 @@ import (
|
|||||||
"github.com/openimsdk/tools/errs"
|
"github.com/openimsdk/tools/errs"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mcontext"
|
"github.com/openimsdk/tools/mcontext"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/utils/stringutil"
|
"github.com/openimsdk/tools/utils/stringutil"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
@ -21,9 +21,9 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
pbmsg "github.com/openimsdk/protocol/msg"
|
pbmsg "github.com/openimsdk/protocol/msg"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -7,12 +7,12 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/protocol/constant"
|
"github.com/openimsdk/protocol/constant"
|
||||||
pbpush "github.com/openimsdk/protocol/push"
|
pbpush "github.com/openimsdk/protocol/push"
|
||||||
"github.com/openimsdk/protocol/sdkws"
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
"github.com/openimsdk/tools/errs"
|
"github.com/openimsdk/tools/errs"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/utils/jsonutil"
|
"github.com/openimsdk/tools/utils/jsonutil"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
@ -166,7 +166,7 @@ func (k *K8sStaticConsistentHash) GetConnsAndOnlinePush(ctx context.Context, msg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost)
|
log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost)
|
||||||
var usersConns = make(map[*grpc.ClientConn][]string)
|
var usersConns = make(map[grpc.ClientConnInterface][]string)
|
||||||
for host, userIds := range usersHost {
|
for host, userIds := range usersHost {
|
||||||
tconn, _ := k.disCov.GetConn(ctx, host)
|
tconn, _ := k.disCov.GetConn(ctx, host)
|
||||||
usersConns[tconn] = userIds
|
usersConns[tconn] = userIds
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||||
@ -25,7 +26,6 @@ import (
|
|||||||
"github.com/openimsdk/tools/discovery"
|
"github.com/openimsdk/tools/discovery"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mcontext"
|
"github.com/openimsdk/tools/mcontext"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/utils/datautil"
|
"github.com/openimsdk/tools/utils/datautil"
|
||||||
"github.com/openimsdk/tools/utils/jsonutil"
|
"github.com/openimsdk/tools/utils/jsonutil"
|
||||||
"github.com/openimsdk/tools/utils/timeutil"
|
"github.com/openimsdk/tools/utils/timeutil"
|
||||||
|
@ -192,7 +192,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, v := range conns {
|
for _, v := range conns {
|
||||||
log.ZDebug(ctx, "forceKickOff", "conn", v.Target())
|
log.ZDebug(ctx, "forceKickOff", "userID", userID, "platformID", platformID)
|
||||||
client := msggateway.NewMsgGatewayClient(v)
|
client := msggateway.NewMsgGatewayClient(v)
|
||||||
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
|
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
|
||||||
_, err := client.KickUserOffline(ctx, kickReq)
|
_, err := client.KickUserOffline(ctx, kickReq)
|
||||||
|
@ -49,11 +49,6 @@ func (m *msgServer) SendMsg(ctx context.Context, req *pbmsg.SendMsgReq) (*pbmsg.
|
|||||||
|
|
||||||
func (m *msgServer) sendMsg(ctx context.Context, req *pbmsg.SendMsgReq, before **sdkws.MsgData) (*pbmsg.SendMsgResp, error) {
|
func (m *msgServer) sendMsg(ctx context.Context, req *pbmsg.SendMsgReq, before **sdkws.MsgData) (*pbmsg.SendMsgResp, error) {
|
||||||
m.encapsulateMsgData(req.MsgData)
|
m.encapsulateMsgData(req.MsgData)
|
||||||
if req.MsgData.ContentType == constant.Stream {
|
|
||||||
if err := m.handlerStreamMsg(ctx, req.MsgData); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch req.MsgData.SessionType {
|
switch req.MsgData.SessionType {
|
||||||
case constant.SingleChatType:
|
case constant.SingleChatType:
|
||||||
return m.sendMsgSingleChat(ctx, req, before)
|
return m.sendMsgSingleChat(ctx, req, before)
|
||||||
|
@ -19,11 +19,12 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||||
@ -37,7 +38,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
|
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
|
||||||
limit := t.s3dataBase.PartLimit()
|
limit, err := t.s3dataBase.PartLimit()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return &third.PartLimitResp{
|
return &third.PartLimitResp{
|
||||||
MinPartSize: limit.MinPartSize,
|
MinPartSize: limit.MinPartSize,
|
||||||
MaxPartSize: limit.MaxPartSize,
|
MaxPartSize: limit.MaxPartSize,
|
||||||
|
@ -31,7 +31,7 @@ type CronTaskConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Start(ctx context.Context, conf *CronTaskConfig) error {
|
func Start(ctx context.Context, conf *CronTaskConfig) error {
|
||||||
conf.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
conf.runTimeEnv = runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", conf.runTimeEnv, "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", conf.runTimeEnv, "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
||||||
if conf.CronTask.RetainChatRecords < 1 {
|
if conf.CronTask.RetainChatRecords < 1 {
|
||||||
|
@ -86,7 +86,7 @@ func (r *RootCmd) initEtcd() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
disConfig := config.Discovery{}
|
disConfig := config.Discovery{}
|
||||||
env := runtimeenv.PrintRuntimeEnvironment()
|
env := runtimeenv.RuntimeEnvironment()
|
||||||
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename],
|
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename],
|
||||||
env, &disConfig)
|
env, &disConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -125,7 +125,7 @@ func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
runtimeEnv := runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
// Load common configuration file
|
// Load common configuration file
|
||||||
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
|
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
|
||||||
|
@ -18,9 +18,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/tools/db/mongoutil"
|
"github.com/openimsdk/tools/db/mongoutil"
|
||||||
"github.com/openimsdk/tools/db/redisutil"
|
"github.com/openimsdk/tools/db/redisutil"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/s3/aws"
|
"github.com/openimsdk/tools/s3/aws"
|
||||||
"github.com/openimsdk/tools/s3/cos"
|
"github.com/openimsdk/tools/s3/cos"
|
||||||
"github.com/openimsdk/tools/s3/kodo"
|
"github.com/openimsdk/tools/s3/kodo"
|
||||||
|
@ -70,7 +70,7 @@ func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConf
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
runTimeEnv := runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
if !autoSetPorts {
|
if !autoSetPorts {
|
||||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||||
@ -177,6 +177,7 @@ func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConf
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = client.Register(
|
err = client.Register(
|
||||||
|
ctx,
|
||||||
rpcRegisterName,
|
rpcRegisterName,
|
||||||
registerIP,
|
registerIP,
|
||||||
port,
|
port,
|
||||||
|
@ -33,12 +33,12 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/protocol/constant"
|
"github.com/openimsdk/protocol/constant"
|
||||||
pbmsg "github.com/openimsdk/protocol/msg"
|
pbmsg "github.com/openimsdk/protocol/msg"
|
||||||
"github.com/openimsdk/protocol/sdkws"
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
"github.com/openimsdk/tools/errs"
|
"github.com/openimsdk/tools/errs"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/utils/datautil"
|
"github.com/openimsdk/tools/utils/datautil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -9,12 +9,12 @@ import (
|
|||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||||
pbmsg "github.com/openimsdk/protocol/msg"
|
pbmsg "github.com/openimsdk/protocol/msg"
|
||||||
"github.com/openimsdk/protocol/sdkws"
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
"github.com/openimsdk/tools/errs"
|
"github.com/openimsdk/tools/errs"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"go.mongodb.org/mongo-driver/mongo"
|
"go.mongodb.org/mongo-driver/mongo"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,10 +19,10 @@ import (
|
|||||||
|
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/protocol/push"
|
"github.com/openimsdk/protocol/push"
|
||||||
"github.com/openimsdk/protocol/sdkws"
|
"github.com/openimsdk/protocol/sdkws"
|
||||||
"github.com/openimsdk/tools/log"
|
"github.com/openimsdk/tools/log"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type PushDatabase interface {
|
type PushDatabase interface {
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type S3Database interface {
|
type S3Database interface {
|
||||||
PartLimit() *s3.PartLimit
|
PartLimit() (*s3.PartLimit, error)
|
||||||
PartSize(ctx context.Context, size int64) (int64, error)
|
PartSize(ctx context.Context, size int64) (int64, error)
|
||||||
AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error)
|
AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error)
|
||||||
InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error)
|
InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error)
|
||||||
@ -65,7 +65,7 @@ func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
|
|||||||
return s.s3.PartSize(ctx, size)
|
return s.s3.PartSize(ctx, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *s3Database) PartLimit() *s3.PartLimit {
|
func (s *s3Database) PartLimit() (*s3.PartLimit, error) {
|
||||||
return s.s3.PartLimit()
|
return s.s3.PartLimit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
33
pkg/common/storage/kafka/config.go
Normal file
33
pkg/common/storage/kafka/config.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Copyright © 2024 OpenIM open source community. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kafka
|
||||||
|
|
||||||
|
type TLSConfig struct {
|
||||||
|
EnableTLS bool `yaml:"enableTLS"`
|
||||||
|
CACrt string `yaml:"caCrt"`
|
||||||
|
ClientCrt string `yaml:"clientCrt"`
|
||||||
|
ClientKey string `yaml:"clientKey"`
|
||||||
|
ClientKeyPwd string `yaml:"clientKeyPwd"`
|
||||||
|
InsecureSkipVerify bool `yaml:"insecureSkipVerify"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Username string `yaml:"username"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
ProducerAck string `yaml:"producerAck"`
|
||||||
|
CompressType string `yaml:"compressType"`
|
||||||
|
Addr []string `yaml:"addr"`
|
||||||
|
TLS TLSConfig `yaml:"tls"`
|
||||||
|
}
|
68
pkg/common/storage/kafka/consumer_group.go
Normal file
68
pkg/common/storage/kafka/consumer_group.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright © 2023 OpenIM. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/IBM/sarama"
|
||||||
|
"github.com/openimsdk/tools/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MConsumerGroup struct {
|
||||||
|
sarama.ConsumerGroup
|
||||||
|
groupID string
|
||||||
|
topics []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMConsumerGroup(conf *Config, groupID string, topics []string, autoCommitEnable bool) (*MConsumerGroup, error) {
|
||||||
|
config, err := BuildConsumerGroupConfig(conf, sarama.OffsetNewest, autoCommitEnable)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
group, err := NewConsumerGroup(config, conf.Addr, groupID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &MConsumerGroup{
|
||||||
|
ConsumerGroup: group,
|
||||||
|
groupID: groupID,
|
||||||
|
topics: topics,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MConsumerGroup) GetContextFromMsg(cMsg *sarama.ConsumerMessage) context.Context {
|
||||||
|
return GetContextWithMQHeader(cMsg.Headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MConsumerGroup) RegisterHandleAndConsumer(ctx context.Context, handler sarama.ConsumerGroupHandler) {
|
||||||
|
for {
|
||||||
|
err := mc.ConsumerGroup.Consume(ctx, mc.topics, handler)
|
||||||
|
if errors.Is(err, sarama.ErrClosedConsumerGroup) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.ZWarn(ctx, "consume err", err, "topic", mc.topics, "groupID", mc.groupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *MConsumerGroup) Close() error {
|
||||||
|
return mc.ConsumerGroup.Close()
|
||||||
|
}
|
82
pkg/common/storage/kafka/producer.go
Normal file
82
pkg/common/storage/kafka/producer.go
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
// Copyright © 2023 OpenIM. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/IBM/sarama"
|
||||||
|
"github.com/openimsdk/tools/errs"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Producer represents a Kafka producer.
|
||||||
|
type Producer struct {
|
||||||
|
addr []string
|
||||||
|
topic string
|
||||||
|
config *sarama.Config
|
||||||
|
producer sarama.SyncProducer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKafkaProducer(config *sarama.Config, addr []string, topic string) (*Producer, error) {
|
||||||
|
producer, err := NewProducer(config, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Producer{
|
||||||
|
addr: addr,
|
||||||
|
topic: topic,
|
||||||
|
config: config,
|
||||||
|
producer: producer,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendMessage sends a message to the Kafka topic configured in the Producer.
|
||||||
|
func (p *Producer) SendMessage(ctx context.Context, key string, msg proto.Message) (int32, int64, error) {
|
||||||
|
// Marshal the protobuf message
|
||||||
|
bMsg, err := proto.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, errs.WrapMsg(err, "kafka proto Marshal err")
|
||||||
|
}
|
||||||
|
if len(bMsg) == 0 {
|
||||||
|
return 0, 0, errs.WrapMsg(errEmptyMsg, "kafka proto Marshal err")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare Kafka message
|
||||||
|
kMsg := &sarama.ProducerMessage{
|
||||||
|
Topic: p.topic,
|
||||||
|
Key: sarama.StringEncoder(key),
|
||||||
|
Value: sarama.ByteEncoder(bMsg),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate message key and value
|
||||||
|
if kMsg.Key.Length() == 0 || kMsg.Value.Length() == 0 {
|
||||||
|
return 0, 0, errs.Wrap(errEmptyMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attach context metadata as headers
|
||||||
|
header, err := GetMQHeaderWithContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
kMsg.Headers = header
|
||||||
|
|
||||||
|
// Send the message
|
||||||
|
partition, offset, err := p.producer.SendMessage(kMsg)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, errs.WrapMsg(err, "p.producer.SendMessage error")
|
||||||
|
}
|
||||||
|
|
||||||
|
return partition, offset, nil
|
||||||
|
}
|
85
pkg/common/storage/kafka/sarama.go
Normal file
85
pkg/common/storage/kafka/sarama.go
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/IBM/sarama"
|
||||||
|
"github.com/openimsdk/tools/errs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BuildConsumerGroupConfig(conf *Config, initial int64, autoCommitEnable bool) (*sarama.Config, error) {
|
||||||
|
kfk := sarama.NewConfig()
|
||||||
|
kfk.Version = sarama.V2_0_0_0
|
||||||
|
kfk.Consumer.Offsets.Initial = initial
|
||||||
|
kfk.Consumer.Offsets.AutoCommit.Enable = autoCommitEnable
|
||||||
|
kfk.Consumer.Return.Errors = false
|
||||||
|
if conf.Username != "" || conf.Password != "" {
|
||||||
|
kfk.Net.SASL.Enable = true
|
||||||
|
kfk.Net.SASL.User = conf.Username
|
||||||
|
kfk.Net.SASL.Password = conf.Password
|
||||||
|
}
|
||||||
|
if conf.TLS.EnableTLS {
|
||||||
|
tls, err := newTLSConfig(conf.TLS.ClientCrt, conf.TLS.ClientKey, conf.TLS.CACrt, []byte(conf.TLS.ClientKeyPwd), conf.TLS.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
kfk.Net.TLS.Config = tls
|
||||||
|
kfk.Net.TLS.Enable = true
|
||||||
|
}
|
||||||
|
return kfk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConsumerGroup(conf *sarama.Config, addr []string, groupID string) (sarama.ConsumerGroup, error) {
|
||||||
|
cg, err := sarama.NewConsumerGroup(addr, groupID, conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "NewConsumerGroup failed", "addr", addr, "groupID", groupID, "conf", *conf)
|
||||||
|
}
|
||||||
|
return cg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildProducerConfig(conf Config) (*sarama.Config, error) {
|
||||||
|
kfk := sarama.NewConfig()
|
||||||
|
kfk.Producer.Return.Successes = true
|
||||||
|
kfk.Producer.Return.Errors = true
|
||||||
|
kfk.Producer.Partitioner = sarama.NewHashPartitioner
|
||||||
|
if conf.Username != "" || conf.Password != "" {
|
||||||
|
kfk.Net.SASL.Enable = true
|
||||||
|
kfk.Net.SASL.User = conf.Username
|
||||||
|
kfk.Net.SASL.Password = conf.Password
|
||||||
|
}
|
||||||
|
switch strings.ToLower(conf.ProducerAck) {
|
||||||
|
case "no_response":
|
||||||
|
kfk.Producer.RequiredAcks = sarama.NoResponse
|
||||||
|
case "wait_for_local":
|
||||||
|
kfk.Producer.RequiredAcks = sarama.WaitForLocal
|
||||||
|
case "wait_for_all":
|
||||||
|
kfk.Producer.RequiredAcks = sarama.WaitForAll
|
||||||
|
default:
|
||||||
|
kfk.Producer.RequiredAcks = sarama.WaitForAll
|
||||||
|
}
|
||||||
|
if conf.CompressType == "" {
|
||||||
|
kfk.Producer.Compression = sarama.CompressionNone
|
||||||
|
} else {
|
||||||
|
if err := kfk.Producer.Compression.UnmarshalText(bytes.ToLower([]byte(conf.CompressType))); err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "UnmarshalText failed", "compressType", conf.CompressType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if conf.TLS.EnableTLS {
|
||||||
|
tls, err := newTLSConfig(conf.TLS.ClientCrt, conf.TLS.ClientKey, conf.TLS.CACrt, []byte(conf.TLS.ClientKeyPwd), conf.TLS.InsecureSkipVerify)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
kfk.Net.TLS.Config = tls
|
||||||
|
kfk.Net.TLS.Enable = true
|
||||||
|
}
|
||||||
|
return kfk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewProducer(conf *sarama.Config, addr []string) (sarama.SyncProducer, error) {
|
||||||
|
producer, err := sarama.NewSyncProducer(addr, conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "NewSyncProducer failed", "addr", addr, "conf", *conf)
|
||||||
|
}
|
||||||
|
return producer, nil
|
||||||
|
}
|
83
pkg/common/storage/kafka/tls.go
Normal file
83
pkg/common/storage/kafka/tls.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright © 2024 OpenIM open source community. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/openimsdk/tools/errs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// decryptPEM decrypts a PEM block using a password.
|
||||||
|
func decryptPEM(data []byte, passphrase []byte) ([]byte, error) {
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
b, _ := pem.Decode(data)
|
||||||
|
d, err := x509.DecryptPEMBlock(b, passphrase)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "DecryptPEMBlock failed")
|
||||||
|
}
|
||||||
|
return pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: b.Type,
|
||||||
|
Bytes: d,
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEncryptablePEMBlock(path string, pwd []byte) ([]byte, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "ReadFile failed", "path", path)
|
||||||
|
}
|
||||||
|
return decryptPEM(data, pwd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTLSConfig setup the TLS config from general config file.
|
||||||
|
func newTLSConfig(clientCertFile, clientKeyFile, caCertFile string, keyPwd []byte, insecureSkipVerify bool) (*tls.Config, error) {
|
||||||
|
var tlsConfig tls.Config
|
||||||
|
if clientCertFile != "" && clientKeyFile != "" {
|
||||||
|
certPEMBlock, err := os.ReadFile(clientCertFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "ReadFile failed", "clientCertFile", clientCertFile)
|
||||||
|
}
|
||||||
|
keyPEMBlock, err := readEncryptablePEMBlock(clientKeyFile, keyPwd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "X509KeyPair failed")
|
||||||
|
}
|
||||||
|
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||||
|
}
|
||||||
|
|
||||||
|
if caCertFile != "" {
|
||||||
|
caCert, err := os.ReadFile(caCertFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.WrapMsg(err, "ReadFile failed", "caCertFile", caCertFile)
|
||||||
|
}
|
||||||
|
caCertPool := x509.NewCertPool()
|
||||||
|
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||||
|
return nil, errs.New("AppendCertsFromPEM failed")
|
||||||
|
}
|
||||||
|
tlsConfig.RootCAs = caCertPool
|
||||||
|
}
|
||||||
|
tlsConfig.InsecureSkipVerify = insecureSkipVerify
|
||||||
|
return &tlsConfig, nil
|
||||||
|
}
|
34
pkg/common/storage/kafka/util.go
Normal file
34
pkg/common/storage/kafka/util.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"github.com/IBM/sarama"
|
||||||
|
"github.com/openimsdk/protocol/constant"
|
||||||
|
"github.com/openimsdk/tools/mcontext"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errEmptyMsg = errors.New("kafka binary msg is empty")
|
||||||
|
|
||||||
|
// GetMQHeaderWithContext extracts message queue headers from the context.
|
||||||
|
func GetMQHeaderWithContext(ctx context.Context) ([]sarama.RecordHeader, error) {
|
||||||
|
operationID, opUserID, platform, connID, err := mcontext.GetCtxInfos(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []sarama.RecordHeader{
|
||||||
|
{Key: []byte(constant.OperationID), Value: []byte(operationID)},
|
||||||
|
{Key: []byte(constant.OpUserID), Value: []byte(opUserID)},
|
||||||
|
{Key: []byte(constant.OpUserPlatform), Value: []byte(platform)},
|
||||||
|
{Key: []byte(constant.ConnID), Value: []byte(connID)},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContextWithMQHeader creates a context from message queue headers.
|
||||||
|
func GetContextWithMQHeader(header []*sarama.RecordHeader) context.Context {
|
||||||
|
var values []string
|
||||||
|
for _, recordHeader := range header {
|
||||||
|
values = append(values, string(recordHeader.Value))
|
||||||
|
}
|
||||||
|
return mcontext.WithMustInfoCtx(values) // Attach extracted values to context
|
||||||
|
}
|
79
pkg/common/storage/kafka/verify.go
Normal file
79
pkg/common/storage/kafka/verify.go
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright © 2024 OpenIM open source community. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package kafka
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/IBM/sarama"
|
||||||
|
"github.com/openimsdk/tools/errs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CheckTopics(ctx context.Context, conf *Config, topics []string) error {
|
||||||
|
kfk, err := BuildConsumerGroupConfig(conf, sarama.OffsetNewest, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cli, err := sarama.NewClient(conf.Addr, kfk)
|
||||||
|
if err != nil {
|
||||||
|
return errs.WrapMsg(err, "NewClient failed", "config: ", fmt.Sprintf("%+v", conf))
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
existingTopics, err := cli.Topics()
|
||||||
|
if err != nil {
|
||||||
|
return errs.WrapMsg(err, "Failed to list topics")
|
||||||
|
}
|
||||||
|
|
||||||
|
existingTopicsMap := make(map[string]bool)
|
||||||
|
for _, t := range existingTopics {
|
||||||
|
existingTopicsMap[t] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, topic := range topics {
|
||||||
|
if !existingTopicsMap[topic] {
|
||||||
|
return errs.New("topic not exist", "topic", topic).Wrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CheckHealth(ctx context.Context, conf *Config) error {
|
||||||
|
kfk, err := BuildConsumerGroupConfig(conf, sarama.OffsetNewest, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cli, err := sarama.NewClient(conf.Addr, kfk)
|
||||||
|
if err != nil {
|
||||||
|
return errs.WrapMsg(err, "NewClient failed", "config: ", fmt.Sprintf("%+v", conf))
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// Get broker list
|
||||||
|
brokers := cli.Brokers()
|
||||||
|
if len(brokers) == 0 {
|
||||||
|
return errs.New("no brokers found").Wrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if all brokers are reachable
|
||||||
|
for _, broker := range brokers {
|
||||||
|
if err := broker.Open(kfk); err != nil {
|
||||||
|
return errs.WrapMsg(err, "failed to open broker", "broker", broker.Addr())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -25,11 +25,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||||
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/kafka"
|
||||||
"github.com/openimsdk/tools/db/mongoutil"
|
"github.com/openimsdk/tools/db/mongoutil"
|
||||||
"github.com/openimsdk/tools/db/redisutil"
|
"github.com/openimsdk/tools/db/redisutil"
|
||||||
"github.com/openimsdk/tools/discovery/etcd"
|
"github.com/openimsdk/tools/discovery/etcd"
|
||||||
"github.com/openimsdk/tools/discovery/zookeeper"
|
"github.com/openimsdk/tools/discovery/zookeeper"
|
||||||
"github.com/openimsdk/tools/mq/kafka"
|
|
||||||
"github.com/openimsdk/tools/s3/minio"
|
"github.com/openimsdk/tools/s3/minio"
|
||||||
"github.com/openimsdk/tools/system/program"
|
"github.com/openimsdk/tools/system/program"
|
||||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||||
@ -84,7 +84,7 @@ func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka,
|
|||||||
discovery = &config.Discovery{}
|
discovery = &config.Discovery{}
|
||||||
thirdConfig = &config.Third{}
|
thirdConfig = &config.Third{}
|
||||||
)
|
)
|
||||||
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
runtimeEnv := runtimeenv.RuntimeEnvironment()
|
||||||
|
|
||||||
err := config.Load(configDir, config.MongodbConfigFileName, config.EnvPrefixMap[config.MongodbConfigFileName], runtimeEnv, mongoConfig)
|
err := config.Load(configDir, config.MongodbConfigFileName, config.EnvPrefixMap[config.MongodbConfigFileName], runtimeEnv, mongoConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -43,7 +43,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func readConfig[T any](dir string, name string) (*T, error) {
|
func readConfig[T any](dir string, name string) (*T, error) {
|
||||||
if runtimeenv.PrintRuntimeEnvironment() == config.KUBERNETES {
|
if runtimeenv.RuntimeEnvironment() == config.KUBERNETES {
|
||||||
dir = os.Getenv(config.MountConfigFilePath)
|
dir = os.Getenv(config.MountConfigFilePath)
|
||||||
}
|
}
|
||||||
v := viper.New()
|
v := viper.New()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user