feat: optimize code and support running in single process mode (#3142)

* pb

* fix: Modifying other fields while setting IsPrivateChat does not take effect

* fix: quote message error revoke

* refactoring scheduled tasks

* refactoring scheduled tasks

* refactoring scheduled tasks

* refactoring scheduled tasks

* refactoring scheduled tasks

* refactoring scheduled tasks

* upgrading pkg tools

* fix

* fix

* optimize log output

* feat: support GetLastMessage

* feat: support GetLastMessage

* feat: s3 switch

* feat: s3 switch

* fix: GetUsersOnline

* feat: SendBusinessNotification supported configuration parameters

* feat: SendBusinessNotification supported configuration parameters

* feat: SendBusinessNotification supported configuration parameters

* feat: seq conversion failed without exiting

* monolithic

* fix: DeleteDoc crash

* fix: DeleteDoc crash

* fix: monolithic

* fix: monolithic

* fix: fill send time

* fix: fill send time

* fix: crash caused by withdrawing messages from users who have left the group

* fix: mq

* fix: mq

* fix: user msg timestamp

* fix: mq

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* seq read config

* seq read config

* 1

* 1

* fix: the source message of the reference is withdrawn, and the referenced message is deleted

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1

* 1
This commit is contained in:
chao 2025-02-14 16:18:27 +08:00 committed by GitHub
parent e37ea50b94
commit 9ed6200e45
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
105 changed files with 3385 additions and 1925 deletions

419
cmd/main.go Normal file
View File

@ -0,0 +1,419 @@
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"net"
"os"
"os/signal"
"path"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/mitchellh/mapstructure"
"github.com/openimsdk/open-im-server/v3/internal/api"
"github.com/openimsdk/open-im-server/v3/internal/msggateway"
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
"github.com/openimsdk/open-im-server/v3/internal/push"
"github.com/openimsdk/open-im-server/v3/internal/rpc/auth"
"github.com/openimsdk/open-im-server/v3/internal/rpc/conversation"
"github.com/openimsdk/open-im-server/v3/internal/rpc/group"
"github.com/openimsdk/open-im-server/v3/internal/rpc/msg"
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
"github.com/openimsdk/open-im-server/v3/internal/rpc/third"
"github.com/openimsdk/open-im-server/v3/internal/rpc/user"
"github.com/openimsdk/open-im-server/v3/internal/tools/cron"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/standalone"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/network"
"github.com/spf13/viper"
"google.golang.org/grpc"
)
func init() {
config.SetStandalone()
prommetrics.RegistryAll()
}
func main() {
var configPath string
flag.StringVar(&configPath, "c", "", "config path")
flag.Parse()
if configPath == "" {
_, _ = fmt.Fprintln(os.Stderr, "config path is empty")
os.Exit(1)
return
}
cmd := newCmds(configPath)
putCmd(cmd, false, auth.Start)
putCmd(cmd, false, conversation.Start)
putCmd(cmd, false, relation.Start)
putCmd(cmd, false, group.Start)
putCmd(cmd, false, msg.Start)
putCmd(cmd, false, third.Start)
putCmd(cmd, false, user.Start)
putCmd(cmd, false, push.Start)
putCmd(cmd, true, msggateway.Start)
putCmd(cmd, true, msgtransfer.Start)
putCmd(cmd, true, api.Start)
putCmd(cmd, true, cron.Start)
ctx := context.Background()
if err := cmd.run(ctx); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "server exit %s", err)
os.Exit(1)
return
}
}
func newCmds(confPath string) *cmds {
return &cmds{confPath: confPath}
}
type cmdName struct {
Name string
Func func(ctx context.Context) error
Block bool
}
type cmds struct {
confPath string
cmds []cmdName
config config.AllConfig
conf map[string]reflect.Value
}
func (x *cmds) getTypePath(typ reflect.Type) string {
return path.Join(typ.PkgPath(), typ.Name())
}
func (x *cmds) initDiscovery() {
x.config.Discovery.Enable = "standalone"
vof := reflect.ValueOf(&x.config.Discovery.RpcService).Elem()
tof := reflect.TypeOf(&x.config.Discovery.RpcService).Elem()
num := tof.NumField()
for i := 0; i < num; i++ {
field := tof.Field(i)
if !field.IsExported() {
continue
}
if field.Type.Kind() != reflect.String {
continue
}
vof.Field(i).SetString(field.Name)
}
}
func (x *cmds) initAllConfig() error {
x.conf = make(map[string]reflect.Value)
vof := reflect.ValueOf(&x.config).Elem()
num := vof.NumField()
for i := 0; i < num; i++ {
field := vof.Field(i)
for ptr := true; ptr; {
if field.Kind() == reflect.Ptr {
field = field.Elem()
} else {
ptr = false
}
}
x.conf[x.getTypePath(field.Type())] = field
val := field.Addr().Interface()
name := val.(interface{ GetConfigFileName() string }).GetConfigFileName()
confData, err := os.ReadFile(filepath.Join(x.confPath, name))
if err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
v := viper.New()
v.SetConfigType("yaml")
if err := v.ReadConfig(bytes.NewReader(confData)); err != nil {
return err
}
opt := func(conf *mapstructure.DecoderConfig) {
conf.TagName = config.StructTagName
}
if err := v.Unmarshal(val, opt); err != nil {
return err
}
}
x.initDiscovery()
x.config.Redis.Disable = false
x.config.LocalCache = config.LocalCache{}
config.InitNotification(&x.config.Notification)
return nil
}
func (x *cmds) parseConf(conf any) error {
vof := reflect.ValueOf(conf)
for {
if vof.Kind() == reflect.Ptr {
vof = vof.Elem()
} else {
break
}
}
tof := vof.Type()
numField := vof.NumField()
for i := 0; i < numField; i++ {
typeField := tof.Field(i)
if !typeField.IsExported() {
continue
}
field := vof.Field(i)
pkt := x.getTypePath(field.Type())
val, ok := x.conf[pkt]
if !ok {
switch field.Interface().(type) {
case config.Index:
case config.Path:
field.SetString(x.confPath)
case config.AllConfig:
field.Set(reflect.ValueOf(x.config))
case *config.AllConfig:
field.Set(reflect.ValueOf(&x.config))
default:
return fmt.Errorf("config field %s %s not found", vof.Type().Name(), typeField.Name)
}
continue
}
field.Set(val)
}
return nil
}
func (x *cmds) add(name string, block bool, fn func(ctx context.Context) error) {
x.cmds = append(x.cmds, cmdName{Name: name, Block: block, Func: fn})
}
func (x *cmds) initLog() error {
conf := x.config.Log
if err := log.InitLoggerFromConfig(
"openim-server",
program.GetProcessName(),
"", "",
conf.RemainLogLevel,
conf.IsStdout,
conf.IsJson,
conf.StorageLocation,
conf.RemainRotationCount,
conf.RotationTime,
strings.TrimSpace(version.Version),
conf.IsSimplify,
); err != nil {
return err
}
return nil
}
func (x *cmds) run(ctx context.Context) error {
if len(x.cmds) == 0 {
return fmt.Errorf("no command to run")
}
if err := x.initAllConfig(); err != nil {
return err
}
if err := x.initLog(); err != nil {
return err
}
ctx, cancel := context.WithCancelCause(ctx)
go func() {
<-ctx.Done()
log.ZError(ctx, "context server exit cause", context.Cause(ctx))
}()
if prometheus := x.config.API.Prometheus; prometheus.Enable {
var (
port int
err error
)
if !prometheus.AutoSetPorts {
port, err = datautil.GetElemByIndex(prometheus.Ports, 0)
if err != nil {
return err
}
}
ip, err := network.GetLocalIP()
if err != nil {
return err
}
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
return fmt.Errorf("prometheus listen %d error %w", port, err)
}
defer listener.Close()
log.ZDebug(ctx, "prometheus start", "addr", listener.Addr())
target, err := json.Marshal(prommetrics.BuildDefaultTarget(ip, listener.Addr().(*net.TCPAddr).Port))
if err != nil {
return err
}
if err := standalone.GetKeyValue().SetKey(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), target); err != nil {
return err
}
go func() {
err := prommetrics.Start(listener)
if err == nil {
err = fmt.Errorf("http done")
}
cancel(fmt.Errorf("prometheus %w", err))
}()
}
go func() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
select {
case <-ctx.Done():
return
case val := <-sigs:
log.ZDebug(ctx, "recv signal", "signal", val.String())
cancel(fmt.Errorf("signal %s", val.String()))
}
}()
for i := range x.cmds {
cmd := x.cmds[i]
if cmd.Block {
continue
}
if err := cmd.Func(ctx); err != nil {
cancel(fmt.Errorf("server %s exit %w", cmd.Name, err))
return err
}
go func() {
if cmd.Block {
cancel(fmt.Errorf("server %s exit", cmd.Name))
}
}()
}
var wait cmdManger
for i := range x.cmds {
cmd := x.cmds[i]
if !cmd.Block {
continue
}
wait.Start(cmd.Name)
go func() {
defer wait.Shutdown(cmd.Name)
if err := cmd.Func(ctx); err != nil {
cancel(fmt.Errorf("server %s exit %w", cmd.Name, err))
return
}
cancel(fmt.Errorf("server %s exit", cmd.Name))
}()
}
<-ctx.Done()
exitCause := context.Cause(ctx)
log.ZWarn(ctx, "notification of service closure", exitCause)
done := wait.Wait()
timeout := time.NewTimer(time.Second * 10)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.ZWarn(ctx, "server exit timeout", nil, "running", wait.Running())
return exitCause
case _, ok := <-done:
if ok {
log.ZWarn(ctx, "waiting for the service to exit", nil, "running", wait.Running())
} else {
log.ZInfo(ctx, "all server exit done")
return exitCause
}
}
}
}
func putCmd[C any](cmd *cmds, block bool, fn func(ctx context.Context, config *C, client discovery.Conn, server grpc.ServiceRegistrar) error) {
name := path.Base(runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
if index := strings.Index(name, "."); index >= 0 {
name = name[:index]
}
cmd.add(name, block, func(ctx context.Context) error {
var conf C
if err := cmd.parseConf(&conf); err != nil {
return err
}
return fn(ctx, &conf, standalone.GetDiscoveryConn(), standalone.GetServiceRegistrar())
})
}
type cmdManger struct {
lock sync.Mutex
done chan struct{}
count int
names map[string]struct{}
}
func (x *cmdManger) Start(name string) {
x.lock.Lock()
defer x.lock.Unlock()
if x.names == nil {
x.names = make(map[string]struct{})
}
if x.done == nil {
x.done = make(chan struct{}, 1)
}
if _, ok := x.names[name]; ok {
panic(fmt.Errorf("cmd %s already exists", name))
}
x.count++
x.names[name] = struct{}{}
}
func (x *cmdManger) Shutdown(name string) {
x.lock.Lock()
defer x.lock.Unlock()
if _, ok := x.names[name]; !ok {
panic(fmt.Errorf("cmd %s not exists", name))
}
delete(x.names, name)
x.count--
if x.count == 0 {
close(x.done)
} else {
select {
case x.done <- struct{}{}:
default:
}
}
}
func (x *cmdManger) Wait() <-chan struct{} {
x.lock.Lock()
defer x.lock.Unlock()
if x.count == 0 || x.done == nil {
tmp := make(chan struct{})
close(tmp)
return tmp
}
return x.done
}
func (x *cmdManger) Running() []string {
x.lock.Lock()
defer x.lock.Unlock()
names := make([]string, 0, len(x.names))
for name := range x.names {
names = append(names, name)
}
return names
}

6
go.mod
View File

@ -13,7 +13,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.71 github.com/openimsdk/protocol v0.0.72-alpha.71
github.com/openimsdk/tools v0.0.50-alpha.70 github.com/openimsdk/tools v0.0.50-alpha.74
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
@ -27,7 +27,6 @@ require (
require github.com/google/uuid v1.6.0 require github.com/google/uuid v1.6.0
require ( require (
github.com/IBM/sarama v1.43.0
github.com/fatih/color v1.14.1 github.com/fatih/color v1.14.1
github.com/gin-contrib/gzip v1.0.1 github.com/gin-contrib/gzip v1.0.1
github.com/go-redis/redis v6.15.9+incompatible github.com/go-redis/redis v6.15.9+incompatible
@ -55,6 +54,7 @@ require (
cloud.google.com/go/iam v1.1.7 // indirect cloud.google.com/go/iam v1.1.7 // indirect
cloud.google.com/go/longrunning v0.5.5 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect
cloud.google.com/go/storage v1.40.0 // indirect cloud.google.com/go/storage v1.40.0 // indirect
github.com/IBM/sarama v1.43.0 // indirect
github.com/MicahParks/keyfunc v1.9.0 // indirect github.com/MicahParks/keyfunc v1.9.0 // indirect
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
github.com/aws/aws-sdk-go-v2 v1.32.5 // indirect github.com/aws/aws-sdk-go-v2 v1.32.5 // indirect
@ -219,3 +219,5 @@ require (
golang.org/x/crypto v0.27.0 // indirect golang.org/x/crypto v0.27.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
) )
//replace github.com/openimsdk/tools => /Users/chao/Desktop/code/tools

4
go.sum
View File

@ -349,8 +349,8 @@ github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrk
github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.71 h1:R3utzOlqepaJWTAmnfJi4ccUM/XIoFasSyjQMOipM70= github.com/openimsdk/protocol v0.0.72-alpha.71 h1:R3utzOlqepaJWTAmnfJi4ccUM/XIoFasSyjQMOipM70=
github.com/openimsdk/protocol v0.0.72-alpha.71/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= github.com/openimsdk/protocol v0.0.72-alpha.71/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw=
github.com/openimsdk/tools v0.0.50-alpha.70 h1:pyqWkJzXbELWU9KKAsWkj3g0flJYNsDTcjR5SLFQAZU= github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc=
github.com/openimsdk/tools v0.0.50-alpha.70/go.mod h1:B+oqV0zdewN7OiEHYJm+hW+8/Te7B8tHHgD8rK5ZLZk= github.com/openimsdk/tools v0.0.50-alpha.74/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=

View File

@ -30,16 +30,14 @@ type ConfigManager struct {
client *clientv3.Client client *clientv3.Client
configPath string configPath string
runtimeEnv string
} }
func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string, runtimeEnv string) *ConfigManager { func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string) *ConfigManager {
cm := &ConfigManager{ cm := &ConfigManager{
imAdminUserID: IMAdminUserID, imAdminUserID: IMAdminUserID,
config: cfg, config: cfg,
client: client, client: client,
configPath: configPath, configPath: configPath,
runtimeEnv: runtimeEnv,
} }
return cm return cm
} }
@ -73,7 +71,7 @@ func (cm *ConfigManager) GetConfig(c *gin.Context) {
func (cm *ConfigManager) GetConfigList(c *gin.Context) { func (cm *ConfigManager) GetConfigList(c *gin.Context) {
var resp apistruct.GetConfigListResp var resp apistruct.GetConfigListResp
resp.ConfigNames = cm.config.GetConfigNames() resp.ConfigNames = cm.config.GetConfigNames()
resp.Environment = runtimeenv.PrintRuntimeEnvironment() resp.Environment = runtimeenv.RuntimeEnvironment()
resp.Version = version.Version resp.Version = version.Version
apiresp.GinSuccess(c, resp) apiresp.GinSuccess(c, resp)
@ -209,13 +207,7 @@ func (cm *ConfigManager) resetConfig(c *gin.Context, checkChange bool, ops ...cl
changedKeys := make([]string, 0, len(configMap)) changedKeys := make([]string, 0, len(configMap))
for k, v := range configMap { for k, v := range configMap {
err := config.Load( err := config.Load(cm.configPath, k, config.EnvPrefixMap[k], v.new)
cm.configPath,
k,
config.EnvPrefixMap[k],
cm.runtimeEnv,
v.new,
)
if err != nil { if err != nil {
log.ZError(c, "load config failed", err) log.ZError(c, "load config failed", err)
continue continue

View File

@ -20,157 +20,85 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"os"
"os/signal"
"strconv" "strconv"
"syscall"
"time" "time"
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config" conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery" "github.com/openimsdk/tools/discovery"
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/network" "github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/tools/utils/runtimeenv" "github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
type Config struct { type Config struct {
*conf.AllConfig conf.AllConfig
RuntimeEnv string ConfigPath conf.Path
ConfigPath string Index conf.Index
} }
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, config *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, index) apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, int(config.Index))
if err != nil { if err != nil {
return err return err
} }
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv, []string{
config.Discovery.RpcService.MessageGateway,
})
if err != nil {
return errs.WrapMsg(err, "failed to register discovery service")
}
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
var (
netDone = make(chan struct{}, 1)
netErr error
prometheusPort int
)
registerIP, err := network.GetRpcRegisterIP("")
if err != nil {
return err
}
getAutoPort := func() (net.Listener, int, error) {
registerAddr := net.JoinHostPort(registerIP, "0")
listener, err := net.Listen("tcp", registerAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if config.API.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
}
router, err := newGinRouter(ctx, client, config) router, err := newGinRouter(ctx, client, config)
if err != nil { if err != nil {
return err return err
} }
if config.API.Prometheus.Enable {
var (
listener net.Listener
)
if config.API.Prometheus.AutoSetPorts { apiCtx, apiCancel := context.WithCancelCause(context.Background())
listener, prometheusPort, err = getAutoPort() done := make(chan struct{})
if err != nil { go func() {
return err httpServer := &http.Server{
} Handler: router,
Addr: net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort)),
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
if err != nil {
return err
}
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
}
} }
go func() { go func() {
if err := prommetrics.ApiInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) { defer close(done)
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort)) select {
netDone <- struct{}{} case <-ctx.Done():
apiCancel(fmt.Errorf("recv ctx %w", context.Cause(ctx)))
case <-apiCtx.Done():
}
log.ZDebug(ctx, "api server is shutting down")
if err := httpServer.Shutdown(context.Background()); err != nil {
log.ZWarn(ctx, "api server shutdown err", err)
} }
}() }()
log.CInfo(ctx, "api server is init", "runtimeEnv", runtimeenv.RuntimeEnvironment(), "address", httpServer.Addr, "apiPort", apiPort)
} err := httpServer.ListenAndServe()
address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort)) if err == nil {
err = errors.New("api done")
server := http.Server{Addr: address, Handler: router}
log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
go func() {
err = server.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, fmt.Sprintf("api start err: %s", server.Addr))
netDone <- struct{}{}
} }
apiCancel(err)
}() }()
if config.Discovery.Enable == conf.ETCD { //if config.Discovery.Enable == conf.ETCD {
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames()) // cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames())
cm.Watch(ctx) // cm.Watch(ctx)
} //}
//sigs := make(chan os.Signal, 1)
sigs := make(chan os.Signal, 1) //signal.Notify(sigs, syscall.SIGTERM)
signal.Notify(sigs, syscall.SIGTERM) //select {
//case val := <-sigs:
shutdown := func() error { // log.ZDebug(ctx, "recv exit", "signal", val.String())
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) // cancel(fmt.Errorf("signal %s", val.String()))
defer cancel() //case <-ctx.Done():
err := server.Shutdown(ctx) //}
if err != nil { <-apiCtx.Done()
return errs.WrapMsg(err, "shutdown err") exitCause := context.Cause(ctx)
} log.ZWarn(ctx, "api server exit", exitCause)
return nil timer := time.NewTimer(time.Second * 15)
} defer timer.Stop()
disetcd.RegisterShutDown(shutdown)
select { select {
case <-sigs: case <-timer.C:
program.SIGTERMExit() log.ZWarn(ctx, "api server graceful stop timeout", nil)
if err := shutdown(); err != nil { case <-done:
return err log.ZDebug(ctx, "api server graceful stop done")
}
case <-netDone:
close(netDone)
return netErr
} }
return nil return exitCause
} }

View File

@ -2,6 +2,7 @@ package api
import ( import (
"encoding/json" "encoding/json"
"errors"
"net/http" "net/http"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -11,16 +12,16 @@ import (
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/etcd" "github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
clientv3 "go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
) )
type PrometheusDiscoveryApi struct { type PrometheusDiscoveryApi struct {
config *Config config *Config
client *clientv3.Client client *clientv3.Client
kv discovery.KeyValue
} }
func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegistry) *PrometheusDiscoveryApi { func NewPrometheusDiscoveryApi(config *Config, client discovery.Conn) *PrometheusDiscoveryApi {
api := &PrometheusDiscoveryApi{ api := &PrometheusDiscoveryApi{
config: config, config: config,
} }
@ -30,43 +31,26 @@ func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegi
return api return api
} }
func (p *PrometheusDiscoveryApi) Enable(c *gin.Context) {
if p.config.Discovery.Enable != conf.ETCD {
c.JSON(http.StatusOK, []struct{}{})
c.Abort()
}
}
func (p *PrometheusDiscoveryApi) discovery(c *gin.Context, key string) { func (p *PrometheusDiscoveryApi) discovery(c *gin.Context, key string) {
eResp, err := p.client.Get(c, prommetrics.BuildDiscoveryKey(key)) value, err := p.kv.GetKey(c, prommetrics.BuildDiscoveryKey(key))
if err != nil { if err != nil {
// Log and respond with an error if preparation fails. if errors.Is(err, discovery.ErrNotSupportedKeyValue) {
apiresp.GinError(c, errs.WrapMsg(err, "etcd get err")) c.JSON(http.StatusOK, []struct{}{})
return
}
apiresp.GinError(c, errs.WrapMsg(err, "get key value"))
return return
} }
if len(eResp.Kvs) == 0 { if len(value) == 0 {
c.JSON(http.StatusOK, []*prommetrics.Target{}) c.JSON(http.StatusOK, []*prommetrics.RespTarget{})
return
} }
var resp prommetrics.RespTarget
var ( if err := json.Unmarshal(value, &resp); err != nil {
resp = &prommetrics.RespTarget{ apiresp.GinError(c, errs.WrapMsg(err, "json unmarshal err"))
Targets: make([]string, 0, len(eResp.Kvs)), return
}
)
for i := range eResp.Kvs {
var target prommetrics.Target
err = json.Unmarshal(eResp.Kvs[i].Value, &target)
if err != nil {
log.ZError(c, "prometheus unmarshal err", errs.Wrap(err))
}
resp.Targets = append(resp.Targets, target.Target)
if resp.Labels == nil {
resp.Labels = target.Labels
}
} }
c.JSON(http.StatusOK, []*prommetrics.RespTarget{&resp})
c.JSON(200, []*prommetrics.RespTarget{resp})
} }
func (p *PrometheusDiscoveryApi) Api(c *gin.Context) { func (p *PrometheusDiscoveryApi) Api(c *gin.Context) {

View File

@ -52,7 +52,7 @@ func prommetricsGin() gin.HandlerFunc {
} }
} }
func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cfg *Config) (*gin.Engine, error) { func newGinRouter(ctx context.Context, client discovery.Conn, cfg *Config) (*gin.Engine, error) {
authConn, err := client.GetConn(ctx, cfg.Discovery.RpcService.Auth) authConn, err := client.GetConn(ctx, cfg.Discovery.RpcService.Auth)
if err != nil { if err != nil {
return nil, err return nil, err
@ -283,7 +283,7 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
} }
{ {
pd := NewPrometheusDiscoveryApi(cfg, client) pd := NewPrometheusDiscoveryApi(cfg, client)
proDiscoveryGroup := r.Group("/prometheus_discovery", pd.Enable) proDiscoveryGroup := r.Group("/prometheus_discovery")
proDiscoveryGroup.GET("/api", pd.Api) proDiscoveryGroup.GET("/api", pd.Api)
proDiscoveryGroup.GET("/user", pd.User) proDiscoveryGroup.GET("/user", pd.User)
proDiscoveryGroup.GET("/group", pd.Group) proDiscoveryGroup.GET("/group", pd.Group)
@ -301,9 +301,8 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
if cfg.Discovery.Enable == config.ETCD { if cfg.Discovery.Enable == config.ETCD {
etcdClient = client.(*etcd.SvcDiscoveryRegistryImpl).GetClient() etcdClient = client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
} }
cm := NewConfigManager(cfg.Share.IMAdminUserID, cfg.AllConfig, etcdClient, cfg.ConfigPath, cfg.RuntimeEnv) cm := NewConfigManager(cfg.Share.IMAdminUserID, &cfg.AllConfig, etcdClient, string(cfg.ConfigPath))
{ {
configGroup := r.Group("/config", cm.CheckAdmin) configGroup := r.Group("/config", cm.CheckAdmin)
configGroup.POST("/get_config_list", cm.GetConfigList) configGroup.POST("/get_config_list", cm.GetConfigList)
configGroup.POST("/get_config", cm.GetConfig) configGroup.POST("/get_config", cm.GetConfig)

View File

@ -29,11 +29,11 @@ import (
type UserApi struct { type UserApi struct {
Client user.UserClient Client user.UserClient
discov discovery.SvcDiscoveryRegistry discov discovery.Conn
config config.RpcService config config.RpcService
} }
func NewUserApi(client user.UserClient, discov discovery.SvcDiscoveryRegistry, config config.RpcService) UserApi { func NewUserApi(client user.UserClient, discov discovery.Conn, config config.RpcService) UserApi {
return UserApi{Client: client, discov: discov, config: config} return UserApi{Client: client, discov: discov, config: config}
} }

View File

@ -22,7 +22,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
@ -35,7 +34,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.Conn, server grpc.ServiceRegistrar) error {
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return err return err
@ -51,26 +50,26 @@ func (s *Server) InitServer(ctx context.Context, config *Config, disCov discover
return nil return nil
} }
func (s *Server) Start(ctx context.Context, index int, conf *Config) error { //func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP, // return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP,
conf.MsgGateway.RPC.RegisterIP, // conf.MsgGateway.RPC.RegisterIP,
conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index, // conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index,
conf.Discovery.RpcService.MessageGateway, // conf.Discovery.RpcService.MessageGateway,
nil, // nil,
conf, // conf,
[]string{ // []string{
conf.Share.GetConfigFileName(), // conf.Share.GetConfigFileName(),
conf.Discovery.GetConfigFileName(), // conf.Discovery.GetConfigFileName(),
conf.MsgGateway.GetConfigFileName(), // conf.MsgGateway.GetConfigFileName(),
conf.WebhooksConfig.GetConfigFileName(), // conf.WebhooksConfig.GetConfigFileName(),
conf.RedisConfig.GetConfigFileName(), // conf.RedisConfig.GetConfigFileName(),
}, // },
[]string{ // []string{
conf.Discovery.RpcService.MessageGateway, // conf.Discovery.RpcService.MessageGateway,
}, // },
s.InitServer, // s.InitServer,
) // )
} //}
type Server struct { type Server struct {
msggateway.UnimplementedMsgGatewayServer msggateway.UnimplementedMsgGatewayServer

View File

@ -19,10 +19,12 @@ import (
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/runtimeenv" "github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
) )
@ -33,26 +35,25 @@ type Config struct {
RedisConfig config.Redis RedisConfig config.Redis
WebhooksConfig config.Webhooks WebhooksConfig config.Webhooks
Discovery config.Discovery Discovery config.Discovery
Index config.Index
RuntimeEnv string
} }
// Start run ws server. // Start run ws server.
func Start(ctx context.Context, index int, conf *Config) error { func Start(ctx context.Context, conf *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment() log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv,
"rpcPorts", conf.MsgGateway.RPC.Ports, "rpcPorts", conf.MsgGateway.RPC.Ports,
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports) "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index) wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, int(conf.Index))
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build()) dbb := dbbuild.NewBuilder(nil, &conf.RedisConfig)
rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
longServer := NewWsServer( longServer := NewWsServer(
conf, conf,
WithPort(wsPort), WithPort(wsPort),
@ -67,12 +68,50 @@ func Start(ctx context.Context, index int, conf *Config) error {
return err return err
}) })
if err := hubServer.InitServer(ctx, conf, client, server); err != nil {
return err
}
go longServer.ChangeOnlineStatus(4) go longServer.ChangeOnlineStatus(4)
netDone := make(chan error) return hubServer.LongConnServer.Run(ctx)
go func() {
err = hubServer.Start(ctx, index, conf)
netDone <- err
}()
return hubServer.LongConnServer.Run(netDone)
} }
//
//// Start run ws server.
//func Start(ctx context.Context, index int, conf *Config) error {
// log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
// "rpcPorts", conf.MsgGateway.RPC.Ports,
// "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
// wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
// if err != nil {
// return err
// }
//
// rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
// if err != nil {
// return err
// }
// longServer := NewWsServer(
// conf,
// WithPort(wsPort),
// WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
// WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
// WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
// )
//
// hubServer := NewServer(longServer, conf, func(srv *Server) error {
// var err error
// longServer.online, err = rpccache.NewOnlineCache(srv.userClient, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
// return err
// })
//
// go longServer.ChangeOnlineStatus(4)
//
// netDone := make(chan error)
// go func() {
// err = hubServer.Start(ctx, index, conf)
// netDone <- err
// }()
// return hubServer.LongConnServer.Run(netDone)
//}

View File

@ -2,7 +2,6 @@ package msggateway
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"sync" "sync"
@ -11,7 +10,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
pbAuth "github.com/openimsdk/protocol/auth" pbAuth "github.com/openimsdk/protocol/auth"
@ -23,19 +21,18 @@ import (
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
type LongConnServer interface { type LongConnServer interface {
Run(done chan error) error Run(ctx context.Context) error
wsHandler(w http.ResponseWriter, r *http.Request) wsHandler(w http.ResponseWriter, r *http.Request)
GetUserAllCons(userID string) ([]*Client, bool) GetUserAllCons(userID string) ([]*Client, bool)
GetUserPlatformCons(userID string, platform int) ([]*Client, bool, bool) GetUserPlatformCons(userID string, platform int) ([]*Client, bool, bool)
Validate(s any) error Validate(s any) error
SetDiscoveryRegistry(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config) error SetDiscoveryRegistry(ctx context.Context, client discovery.Conn, config *Config) error
KickUserConn(client *Client) error KickUserConn(client *Client) error
UnRegister(c *Client) UnRegister(c *Client)
SetKickHandlerInfo(i *kickHandler) SetKickHandlerInfo(i *kickHandler)
@ -60,7 +57,7 @@ type WsServer struct {
handshakeTimeout time.Duration handshakeTimeout time.Duration
writeBufferSize int writeBufferSize int
validate *validator.Validate validate *validator.Validate
disCov discovery.SvcDiscoveryRegistry disCov discovery.Conn
Compressor Compressor
//Encoder //Encoder
MessageHandler MessageHandler
@ -75,7 +72,7 @@ type kickHandler struct {
newClient *Client newClient *Client
} }
func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.SvcDiscoveryRegistry, config *Config) error { func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.Conn, config *Config) error {
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return err return err
@ -158,19 +155,14 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
} }
} }
func (ws *WsServer) Run(done chan error) error { func (ws *WsServer) Run(ctx context.Context) error {
var ( var client *Client
client *Client
netErr error
shutdownDone = make(chan struct{}, 1)
)
server := http.Server{Addr: ":" + stringutil.IntToString(ws.port), Handler: nil}
ctx, cancel := context.WithCancelCause(ctx)
go func() { go func() {
for { for {
select { select {
case <-shutdownDone: case <-ctx.Done():
return return
case client = <-ws.registerChan: case client = <-ws.registerChan:
ws.registerClient(client) ws.registerClient(client)
@ -181,57 +173,56 @@ func (ws *WsServer) Run(done chan error) error {
} }
} }
}() }()
netDone := make(chan struct{}, 1)
go func() {
http.HandleFunc("/", ws.wsHandler)
err := server.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, "ws start err", server.Addr)
netDone <- struct{}{}
}
}()
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
shutDown := func() error {
sErr := server.Shutdown(ctx)
if sErr != nil {
return errs.WrapMsg(sErr, "shutdown err")
}
close(shutdownDone)
return nil
}
etcd.RegisterShutDown(shutDown)
defer cancel()
var err error
select {
case err = <-done:
if err := shutDown(); err != nil {
return err
}
if err != nil {
return err
}
case <-netDone:
}
return netErr
done := make(chan struct{})
go func() {
wsServer := http.Server{Addr: fmt.Sprintf(":%d", ws.port), Handler: nil}
http.HandleFunc("/", ws.wsHandler)
go func() {
defer close(done)
<-ctx.Done()
_ = wsServer.Shutdown(context.Background())
}()
err := wsServer.ListenAndServe()
if err == nil {
err = fmt.Errorf("http server closed")
}
cancel(fmt.Errorf("msg gateway %w", err))
}()
<-ctx.Done()
timeout := time.NewTimer(time.Second * 15)
defer timeout.Stop()
select {
case <-timeout.C:
log.ZWarn(ctx, "msg gateway graceful stop timeout", nil)
case <-done:
log.ZDebug(ctx, "msg gateway graceful stop done")
}
return context.Cause(ctx)
} }
var concurrentRequest = 3 const concurrentRequest = 3
func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *Client) error { func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *Client) error {
conns, err := ws.disCov.GetConns(ctx, ws.msgGatewayConfig.Discovery.RpcService.MessageGateway) conns, err := ws.disCov.GetConns(ctx, ws.msgGatewayConfig.Discovery.RpcService.MessageGateway)
if err != nil { if err != nil {
return err return err
} }
if len(conns) == 0 || (len(conns) == 1 && ws.disCov.IsSelfNode(conns[0])) {
return nil
}
wg := errgroup.Group{} wg := errgroup.Group{}
wg.SetLimit(concurrentRequest) wg.SetLimit(concurrentRequest)
// Online push user online message to other node // Online push user online message to other node
for _, v := range conns { for _, v := range conns {
v := v v := v
log.ZDebug(ctx, " sendUserOnlineInfoToOtherNode conn ", "target", v.Target()) log.ZDebug(ctx, "sendUserOnlineInfoToOtherNode conn")
if v.Target() == ws.disCov.GetSelfConnTarget() { if ws.disCov.IsSelfNode(v) {
log.ZDebug(ctx, "Filter out this node", "node", v.Target()) log.ZDebug(ctx, "Filter out this node")
continue continue
} }
@ -242,7 +233,7 @@ func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *C
PlatformID: int32(client.PlatformID), Token: client.token, PlatformID: int32(client.PlatformID), Token: client.token,
}) })
if err != nil { if err != nil {
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err, "node", v.Target()) log.ZWarn(ctx, "MultiTerminalLoginCheck err", err)
} }
return nil return nil
}) })

View File

@ -16,51 +16,35 @@ package msgtransfer
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net"
"net/http"
"os"
"os/signal"
"strconv"
"syscall"
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/mq"
"github.com/openimsdk/tools/utils/runtimeenv" "github.com/openimsdk/tools/utils/runtimeenv"
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config" conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
type MsgTransfer struct { type MsgTransfer struct {
historyConsumer mq.Consumer
historyMongoConsumer mq.Consumer
// This consumer aggregated messages, subscribed to the topic:toRedis, // This consumer aggregated messages, subscribed to the topic:toRedis,
// the message is stored in redis, Incr Redis, and then the message is sent to toPush topic for push, // the message is stored in redis, Incr Redis, and then the message is sent to toPush topic for push,
// and the message is sent to toMongo topic for persistence // and the message is sent to toMongo topic for persistence
historyCH *OnlineHistoryRedisConsumerHandler historyHandler *OnlineHistoryRedisConsumerHandler
//This consumer handle message to mongo //This consumer handle message to mongo
historyMongoCH *OnlineHistoryMongoConsumerHandler historyMongoHandler *OnlineHistoryMongoConsumerHandler
ctx context.Context ctx context.Context
cancel context.CancelFunc //cancel context.CancelFunc
runTimeEnv string
} }
type Config struct { type Config struct {
@ -71,48 +55,59 @@ type Config struct {
Share conf.Share Share conf.Share
WebhooksConfig conf.Webhooks WebhooksConfig conf.Webhooks
Discovery conf.Discovery Discovery conf.Discovery
Index conf.Index
} }
func Start(ctx context.Context, index int, config *Config) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
runTimeEnv := runtimeenv.PrintRuntimeEnvironment() builder := mqbuild.NewBuilder(&config.KafkaConfig)
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts", log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "prometheusPorts",
config.MsgTransfer.Prometheus.Ports, "index", index) config.MsgTransfer.Prometheus.Ports, "index", config.Index)
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
client, err := discRegister.NewDiscoveryRegister(&config.Discovery, runTimeEnv, nil)
//if config.Discovery.Enable == conf.ETCD {
// cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
// config.MsgTransfer.GetConfigFileName(),
// config.RedisConfig.GetConfigFileName(),
// config.MongodbConfig.GetConfigFileName(),
// config.KafkaConfig.GetConfigFileName(),
// config.Share.GetConfigFileName(),
// config.WebhooksConfig.GetConfigFileName(),
// config.Discovery.GetConfigFileName(),
// conf.LogConfigFileName,
// })
// cm.Watch(ctx)
//}
mongoProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToMongoTopic)
if err != nil { if err != nil {
return err return err
} }
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), pushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToPushTopic)
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) if err != nil {
return err
if config.Discovery.Enable == conf.ETCD {
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
config.MsgTransfer.GetConfigFileName(),
config.RedisConfig.GetConfigFileName(),
config.MongodbConfig.GetConfigFileName(),
config.KafkaConfig.GetConfigFileName(),
config.Share.GetConfigFileName(),
config.WebhooksConfig.GetConfigFileName(),
config.Discovery.GetConfigFileName(),
conf.LogConfigFileName,
})
cm.Watch(ctx)
} }
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB()) msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil { if err != nil {
return err return err
} }
msgModel := redis.NewMsgCache(rdb, msgDocModel) var msgModel cache.MsgCache
if rdb == nil {
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
if err != nil {
return err
}
msgModel = mcache.NewMsgCache(cm, msgDocModel)
} else {
msgModel = redis.NewMsgCache(rdb, msgDocModel)
}
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB()) seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil { if err != nil {
return err return err
@ -123,124 +118,68 @@ func Start(ctx context.Context, index int, config *Config) error {
return err return err
} }
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig) msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, mongoProducer, pushProducer)
if err != nil { if err != nil {
return err return err
} }
historyCH, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase) historyConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToRedisTopic)
if err != nil { if err != nil {
return err return err
} }
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase) historyMongoConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToMongoTopic)
if err != nil { if err != nil {
return err return err
} }
historyHandler, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase)
if err != nil {
return err
}
historyMongoHandler := NewOnlineHistoryMongoConsumerHandler(msgTransferDatabase)
msgTransfer := &MsgTransfer{ msgTransfer := &MsgTransfer{
historyCH: historyCH, historyConsumer: historyConsumer,
historyMongoCH: historyMongoCH, historyMongoConsumer: historyMongoConsumer,
runTimeEnv: runTimeEnv, historyHandler: historyHandler,
historyMongoHandler: historyMongoHandler,
} }
return msgTransfer.Start(index, config, client) return msgTransfer.Start(ctx)
} }
func (m *MsgTransfer) Start(index int, config *Config, client discovery.SvcDiscoveryRegistry) error { func (m *MsgTransfer) Start(ctx context.Context) error {
m.ctx, m.cancel = context.WithCancel(context.Background()) var cancel context.CancelCauseFunc
var ( m.ctx, cancel = context.WithCancelCause(ctx)
netDone = make(chan struct{}, 1)
netErr error
)
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH) go func() {
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH) for {
go m.historyCH.HandleUserHasReadSeqMessages(m.ctx) if err := m.historyConsumer.Subscribe(m.ctx, m.historyHandler.HandlerRedisMessage); err != nil {
err := m.historyCH.redisMessageBatches.Start() cancel(fmt.Errorf("history consumer %w", err))
log.ZError(m.ctx, "historyConsumer err", err)
return
}
}
}()
go func() {
fn := func(ctx context.Context, key string, value []byte) error {
m.historyMongoHandler.HandleChatWs2Mongo(ctx, key, value)
return nil
}
for {
if err := m.historyMongoConsumer.Subscribe(m.ctx, fn); err != nil {
cancel(fmt.Errorf("history mongo consumer %w", err))
log.ZError(m.ctx, "historyMongoConsumer err", err)
return
}
}
}()
go m.historyHandler.HandleUserHasReadSeqMessages(m.ctx)
err := m.historyHandler.redisMessageBatches.Start()
if err != nil { if err != nil {
return err return err
} }
<-m.ctx.Done()
registerIP, err := network.GetRpcRegisterIP("") return context.Cause(m.ctx)
if err != nil {
return err
}
getAutoPort := func() (net.Listener, int, error) {
registerAddr := net.JoinHostPort(registerIP, "0")
listener, err := net.Listen("tcp", registerAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if config.MsgTransfer.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
}
if config.MsgTransfer.Prometheus.Enable {
var (
listener net.Listener
prometheusPort int
)
if config.MsgTransfer.Prometheus.AutoSetPorts {
listener, prometheusPort, err = getAutoPort()
if err != nil {
return err
}
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(context.TODO(), prommetrics.BuildDiscoveryKey(prommetrics.MessageTransferKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
if err != nil {
return err
}
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
}
}
go func() {
defer func() {
if r := recover(); r != nil {
log.ZPanic(m.ctx, "MsgTransfer Start Panic", errs.ErrPanic(r))
}
}()
if err := prommetrics.TransferInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{}
}
}()
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGTERM)
select {
case <-sigs:
program.SIGTERMExit()
// graceful close kafka client.
m.cancel()
m.historyCH.redisMessageBatches.Close()
m.historyCH.Close()
m.historyCH.historyConsumerGroup.Close()
m.historyMongoCH.historyConsumerGroup.Close()
return nil
case <-netDone:
m.cancel()
m.historyCH.redisMessageBatches.Close()
m.historyCH.Close()
m.historyCH.historyConsumerGroup.Close()
m.historyMongoCH.historyConsumerGroup.Close()
close(netDone)
return netErr
}
} }

View File

@ -18,14 +18,13 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/tools/discovery"
"strconv"
"strings"
"sync" "sync"
"time" "time"
"github.com/IBM/sarama" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/tools/discovery"
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
@ -37,7 +36,6 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/stringutil" "github.com/openimsdk/tools/utils/stringutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
@ -64,9 +62,7 @@ type userHasReadSeq struct {
} }
type OnlineHistoryRedisConsumerHandler struct { type OnlineHistoryRedisConsumerHandler struct {
historyConsumerGroup *kafka.MConsumerGroup redisMessageBatches *batcher.Batcher[ConsumerMessage]
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
msgTransferDatabase controller.MsgTransferDatabase msgTransferDatabase controller.MsgTransferDatabase
conversationUserHasReadChan chan *userHasReadSeq conversationUserHasReadChan chan *userHasReadSeq
@ -76,12 +72,13 @@ type OnlineHistoryRedisConsumerHandler struct {
conversationClient *rpcli.ConversationClient conversationClient *rpcli.ConversationClient
} }
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) { type ConsumerMessage struct {
kafkaConf := config.KafkaConfig Ctx context.Context
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false) Key string
if err != nil { Value []byte
return nil, err }
}
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.Conn, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) {
groupConn, err := client.GetConn(ctx, config.Discovery.RpcService.Group) groupConn, err := client.GetConn(ctx, config.Discovery.RpcService.Group)
if err != nil { if err != nil {
return nil, err return nil, err
@ -97,7 +94,7 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
och.conversationClient = rpcli.NewConversationClient(conversationConn) och.conversationClient = rpcli.NewConversationClient(conversationConn)
och.wg.Add(1) och.wg.Add(1)
b := batcher.New[sarama.ConsumerMessage]( b := batcher.New[ConsumerMessage](
batcher.WithSize(size), batcher.WithSize(size),
batcher.WithWorker(worker), batcher.WithWorker(worker),
batcher.WithInterval(interval), batcher.WithInterval(interval),
@ -109,16 +106,15 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
hashCode := stringutil.GetHashCode(key) hashCode := stringutil.GetHashCode(key)
return int(hashCode) % och.redisMessageBatches.Worker() return int(hashCode) % och.redisMessageBatches.Worker()
} }
b.Key = func(consumerMessage *sarama.ConsumerMessage) string { b.Key = func(consumerMessage *ConsumerMessage) string {
return string(consumerMessage.Key) return consumerMessage.Key
} }
b.Do = och.do b.Do = och.do
och.redisMessageBatches = b och.redisMessageBatches = b
och.historyConsumerGroup = historyConsumerGroup
return &och, nil return &och, nil
} }
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) { func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[ConsumerMessage]) {
ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID()) ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID())
ctxMessages := och.parseConsumerMessages(ctx, val.Val()) ctxMessages := och.parseConsumerMessages(ctx, val.Val())
ctx = withAggregationCtx(ctx, ctxMessages) ctx = withAggregationCtx(ctx, ctxMessages)
@ -189,7 +185,7 @@ func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context,
} }
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg { func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*ConsumerMessage) []*ContextMsg {
var ctxMessages []*ContextMsg var ctxMessages []*ContextMsg
for i := 0; i < len(consumerMessages); i++ { for i := 0; i < len(consumerMessages); i++ {
ctxMsg := &ContextMsg{} ctxMsg := &ContextMsg{}
@ -199,16 +195,9 @@ func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.
log.ZWarn(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value)) log.ZWarn(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value))
continue continue
} }
var arr []string ctxMsg.ctx = consumerMessages[i].Ctx
for i, header := range consumerMessages[i].Headers {
arr = append(arr, strconv.Itoa(i), string(header.Key), string(header.Value))
}
log.ZDebug(ctx, "consumer.kafka.GetContextWithMQHeader", "len", len(consumerMessages[i].Headers),
"header", strings.Join(arr, ", "))
ctxMsg.ctx = kafka.GetContextWithMQHeader(consumerMessages[i].Headers)
ctxMsg.message = msgFromMQ ctxMsg.message = msgFromMQ
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key", log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key", consumerMessages[i].Key)
string(consumerMessages[i].Key))
ctxMessages = append(ctxMessages, ctxMsg) ctxMessages = append(ctxMessages, ctxMsg)
} }
return ctxMessages return ctxMessages
@ -383,7 +372,9 @@ func (och *OnlineHistoryRedisConsumerHandler) Close() {
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) { func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
for _, v := range msgs { for _, v := range msgs {
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String()) log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message) if err := och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message); err != nil {
log.ZError(ctx, "msg to push topic error", err, "msg", v.message.String())
}
} }
} }
@ -401,35 +392,10 @@ func withAggregationCtx(ctx context.Context, values []*ContextMsg) context.Conte
return mcontext.SetOperationID(ctx, allMessageOperationID) return mcontext.SetOperationID(ctx, allMessageOperationID)
} }
func (och *OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (och *OnlineHistoryRedisConsumerHandler) HandlerRedisMessage(ctx context.Context, key string, value []byte) error { // a instance in the consumer group
func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { err := och.redisMessageBatches.Put(ctx, &ConsumerMessage{Ctx: ctx, Key: key, Value: value})
if err != nil {
log.ZWarn(ctx, "put msg to error", err, "key", key, "value", value)
}
return nil return nil
} }
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
session.MarkMessage(lastMessage, "")
session.Commit()
}
for {
select {
case msg, ok := <-claim.Messages():
if !ok {
return nil
}
if len(msg.Value) == 0 {
continue
}
err := och.redisMessageBatches.Put(context.Background(), msg)
if err != nil {
log.ZWarn(context.Background(), "put msg to error", err, "msg", msg)
}
case <-session.Context().Done():
return nil
}
}
}

View File

@ -17,36 +17,24 @@ package msgtransfer
import ( import (
"context" "context"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
pbmsg "github.com/openimsdk/protocol/msg" pbmsg "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
type OnlineHistoryMongoConsumerHandler struct { type OnlineHistoryMongoConsumerHandler struct {
historyConsumerGroup *kafka.MConsumerGroup msgTransferDatabase controller.MsgTransferDatabase
msgTransferDatabase controller.MsgTransferDatabase
} }
func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) { func NewOnlineHistoryMongoConsumerHandler(database controller.MsgTransferDatabase) *OnlineHistoryMongoConsumerHandler {
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true) return &OnlineHistoryMongoConsumerHandler{
if err != nil { msgTransferDatabase: database,
return nil, err
} }
mc := &OnlineHistoryMongoConsumerHandler{
historyConsumerGroup: historyConsumerGroup,
msgTransferDatabase: database,
}
return mc, nil
} }
func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Context, cMsg *sarama.ConsumerMessage, key string, session sarama.ConsumerGroupSession) { func (mc *OnlineHistoryMongoConsumerHandler) HandleChatWs2Mongo(ctx context.Context, key string, msg []byte) {
msg := cMsg.Value
msgFromMQ := pbmsg.MsgDataToMongoByMQ{} msgFromMQ := pbmsg.MsgDataToMongoByMQ{}
err := proto.Unmarshal(msg, &msgFromMQ) err := proto.Unmarshal(msg, &msgFromMQ)
if err != nil { if err != nil {
@ -54,7 +42,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
return return
} }
if len(msgFromMQ.MsgData) == 0 { if len(msgFromMQ.MsgData) == 0 {
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg) log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "key", key, "msg", msg)
return return
} }
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String()) log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
@ -82,22 +70,3 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
// msgFromMQ.MsgData, "conversationID", msgFromMQ.ConversationID) // msgFromMQ.MsgData, "conversationID", msgFromMQ.ConversationID)
//} //}
} }
func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // an instance in the consumer group
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
for msg := range claim.Messages() {
ctx := mc.historyConsumerGroup.GetContextFromMsg(msg)
if len(msg.Value) != 0 {
mc.handleChatWs2Mongo(ctx, msg, string(msg.Key), sess)
} else {
log.ZError(ctx, "mongo msg get from kafka but is nil", nil, "conversationID", msg.Key)
}
sess.MarkMessage(msg, "")
}
return nil
}

View File

@ -17,6 +17,7 @@ package push
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct" "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"

View File

@ -3,7 +3,6 @@ package push
import ( import (
"context" "context"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
@ -12,40 +11,21 @@ import (
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
type OfflinePushConsumerHandler struct { type OfflinePushConsumerHandler struct {
OfflinePushConsumerGroup *kafka.MConsumerGroup offlinePusher offlinepush.OfflinePusher
offlinePusher offlinepush.OfflinePusher
} }
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) { func NewOfflinePushConsumerHandler(offlinePusher offlinepush.OfflinePusher) *OfflinePushConsumerHandler {
var offlinePushConsumerHandler OfflinePushConsumerHandler return &OfflinePushConsumerHandler{
var err error offlinePusher: offlinePusher,
offlinePushConsumerHandler.offlinePusher = offlinePusher
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
if err != nil {
return nil, err
} }
return &offlinePushConsumerHandler, nil
} }
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } func (o *OfflinePushConsumerHandler) HandleMsg2OfflinePush(ctx context.Context, msg []byte) {
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
o.handleMsg2OfflinePush(ctx, msg.Value)
sess.MarkMessage(msg, "")
}
return nil
}
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
offlinePushMsg := pbpush.PushMsgReq{} offlinePushMsg := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil { if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg)) log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))

View File

@ -2,7 +2,7 @@ package push
import ( import (
"context" "context"
"errors" "fmt"
"sync" "sync"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
@ -11,6 +11,7 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/runtimeenv"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -30,37 +31,36 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
return &emptyOnlinePusher{} return &emptyOnlinePusher{}
} }
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil) log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
return nil, nil return nil, nil
} }
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil) log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
return nil return nil
} }
func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) OnlinePusher { func NewOnlinePusher(disCov discovery.Conn, config *Config) (OnlinePusher, error) {
if conf.Standalone() {
if config.runTimeEnv == conf.KUBERNETES { return NewDefaultAllNode(disCov, config), nil
return NewDefaultAllNode(disCov, config) }
if runtimeenv.RuntimeEnvironment() == conf.KUBERNETES {
return NewDefaultAllNode(disCov, config), nil
} }
switch config.Discovery.Enable { switch config.Discovery.Enable {
case conf.ETCD: case conf.ETCD:
return NewDefaultAllNode(disCov, config) return NewDefaultAllNode(disCov, config), nil
default: default:
log.ZError(context.Background(), "NewOnlinePusher is error", errs.Wrap(errors.New("unsupported discovery type")), "type", config.Discovery.Enable) return nil, errs.New(fmt.Sprintf("unsupported discovery type %s", config.Discovery.Enable))
return nil
} }
} }
type DefaultAllNode struct { type DefaultAllNode struct {
disCov discovery.SvcDiscoveryRegistry disCov discovery.Conn
config *Config config *Config
} }
func NewDefaultAllNode(disCov discovery.SvcDiscoveryRegistry, config *Config) *DefaultAllNode { func NewDefaultAllNode(disCov discovery.Conn, config *Config) *DefaultAllNode {
return &DefaultAllNode{disCov: disCov, config: config} return &DefaultAllNode{disCov: disCov, config: config}
} }
@ -166,7 +166,7 @@ func (k *K8sStaticConsistentHash) GetConnsAndOnlinePush(ctx context.Context, msg
} }
} }
log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost) log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost)
var usersConns = make(map[*grpc.ClientConn][]string) var usersConns = make(map[grpc.ClientConnInterface][]string)
for host, userIds := range usersHost { for host, userIds := range usersHost {
tconn, _ := k.disCov.GetConn(ctx, host) tconn, _ := k.disCov.GetConn(ctx, host)
usersConns[tconn] = userIds usersConns[tconn] = userIds

View File

@ -2,39 +2,43 @@ package push
import ( import (
"context" "context"
"math/rand"
"strconv"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
pbpush "github.com/openimsdk/protocol/push" pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/utils/runtimeenv" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
type pushServer struct { type pushServer struct {
pbpush.UnimplementedPushMsgServiceServer pbpush.UnimplementedPushMsgServiceServer
database controller.PushDatabase database controller.PushDatabase
disCov discovery.SvcDiscoveryRegistry disCov discovery.Conn
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
pushCh *ConsumerHandler
offlinePushCh *OfflinePushConsumerHandler
} }
type Config struct { type Config struct {
RpcConfig config.Push RpcConfig config.Push
RedisConfig config.Redis RedisConfig config.Redis
MongoConfig config.Mongo
KafkaConfig config.Kafka KafkaConfig config.Kafka
NotificationConfig config.Notification NotificationConfig config.Notification
Share config.Share Share config.Share
WebhooksConfig config.Webhooks WebhooksConfig config.Webhooks
LocalCacheConfig config.LocalCache LocalCacheConfig config.LocalCache
Discovery config.Discovery Discovery config.Discovery
FcmConfigPath string FcmConfigPath config.Path
runTimeEnv string
} }
func (p pushServer) DelUserPushToken(ctx context.Context, func (p pushServer) DelUserPushToken(ctx context.Context,
@ -45,42 +49,90 @@ func (p pushServer) DelUserPushToken(ctx context.Context,
return &pbpush.DelUserPushTokenResp{}, nil return &pbpush.DelUserPushTokenResp{}, nil
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
config.runTimeEnv = runtimeenv.PrintRuntimeEnvironment() dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
rdb, err := dbb.Redis(ctx)
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
if err != nil { if err != nil {
return err return err
} }
cacheModel := redis.NewThirdCache(rdb) var cacheModel cache.ThirdCache
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, config.FcmConfigPath) if rdb == nil {
mdb, err := dbb.Mongo(ctx)
if err != nil {
return err
}
mc, err := mgo.NewCacheMgo(mdb.GetDB())
if err != nil {
return err
}
cacheModel = mcache.NewThirdCache(mc)
} else {
cacheModel = redis.NewThirdCache(rdb)
}
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, string(config.FcmConfigPath))
if err != nil {
return err
}
builder := mqbuild.NewBuilder(&config.KafkaConfig)
offlinePushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToOfflinePushTopic)
if err != nil {
return err
}
database := controller.NewPushDatabase(cacheModel, offlinePushProducer)
pushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToPushTopic)
if err != nil {
return err
}
offlinePushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToOfflinePushTopic)
if err != nil { if err != nil {
return err return err
} }
database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig) pushHandler, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
consumer, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
if err != nil { if err != nil {
return err return err
} }
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher) offlineHandler := NewOfflinePushConsumerHandler(offlinePusher)
if err != nil {
return err
}
pbpush.RegisterPushMsgServiceServer(server, &pushServer{ pbpush.RegisterPushMsgServiceServer(server, &pushServer{
database: database, database: database,
disCov: client, disCov: client,
offlinePusher: offlinePusher, offlinePusher: offlinePusher,
pushCh: consumer,
offlinePushCh: offlinePushConsumer,
}) })
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer) go func() {
pushHandler.WaitCache()
fn := func(ctx context.Context, key string, value []byte) error {
pushHandler.HandleMs2PsChat(ctx, value)
return nil
}
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
log.ZInfo(consumerCtx, "begin consume messages")
for {
if err := pushConsumer.Subscribe(consumerCtx, fn); err != nil {
log.ZError(consumerCtx, "subscribe err", err)
return
}
}
}()
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer) go func() {
fn := func(ctx context.Context, key string, value []byte) error {
offlineHandler.HandleMsg2OfflinePush(ctx, value)
return nil
}
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
log.ZInfo(consumerCtx, "begin consume messages")
for {
if err := offlinePushConsumer.Subscribe(consumerCtx, fn); err != nil {
log.ZError(consumerCtx, "subscribe err", err)
return
}
}
}()
return nil return nil
} }

View File

@ -3,13 +3,8 @@ package push
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"math/rand"
"strconv"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/IBM/sarama"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options" "github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
@ -17,6 +12,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
@ -25,7 +21,6 @@ import (
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/timeutil" "github.com/openimsdk/tools/utils/timeutil"
@ -34,7 +29,7 @@ import (
) )
type ConsumerHandler struct { type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup //pushConsumerGroup mq.Consumer
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher onlinePusher OnlinePusher
pushDatabase controller.PushDatabase pushDatabase controller.PushDatabase
@ -49,15 +44,7 @@ type ConsumerHandler struct {
conversationClient *rpcli.ConversationClient conversationClient *rpcli.ConversationClient
} }
func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.Conn) (*ConsumerHandler, error) {
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
var consumerHandler ConsumerHandler
var err error
consumerHandler.pushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToPushGroupID,
[]string{config.KafkaConfig.ToPushTopic}, true)
if err != nil {
return nil, err
}
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return nil, err return nil, err
@ -74,13 +61,18 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
if err != nil { if err != nil {
return nil, err return nil, err
} }
onlinePusher, err := NewOnlinePusher(client, config)
if err != nil {
return nil, err
}
var consumerHandler ConsumerHandler
consumerHandler.userClient = rpcli.NewUserClient(userConn) consumerHandler.userClient = rpcli.NewUserClient(userConn)
consumerHandler.groupClient = rpcli.NewGroupClient(groupConn) consumerHandler.groupClient = rpcli.NewGroupClient(groupConn)
consumerHandler.msgClient = rpcli.NewMsgClient(msgConn) consumerHandler.msgClient = rpcli.NewMsgClient(msgConn)
consumerHandler.conversationClient = rpcli.NewConversationClient(conversationConn) consumerHandler.conversationClient = rpcli.NewConversationClient(conversationConn)
consumerHandler.offlinePusher = offlinePusher consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.onlinePusher = onlinePusher
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupClient, &config.LocalCacheConfig, rdb) consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupClient, &config.LocalCacheConfig, rdb)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationClient, &config.LocalCacheConfig, rdb) consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
@ -93,7 +85,7 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
return &consumerHandler, nil return &consumerHandler, nil
} }
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) { func (c *ConsumerHandler) HandleMs2PsChat(ctx context.Context, msg []byte) {
msgFromMQ := pbpush.PushMsgReq{} msgFromMQ := pbpush.PushMsgReq{}
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil { if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg)) log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
@ -127,25 +119,12 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
} }
} }
func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil } func (c *ConsumerHandler) WaitCache() {
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
c.onlineCache.Lock.Lock() c.onlineCache.Lock.Lock()
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver { for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
c.onlineCache.Cond.Wait() c.onlineCache.Cond.Wait()
} }
c.onlineCache.Lock.Unlock() c.onlineCache.Lock.Unlock()
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
log.ZInfo(ctx, "begin consume messages")
for msg := range claim.Messages() {
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
c.handleMs2PsChat(ctx, msg.Value)
sess.MarkMessage(msg, "")
}
return nil
} }
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.

View File

@ -18,11 +18,14 @@ import (
"context" "context"
"errors" "errors"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
@ -43,7 +46,7 @@ import (
type authServer struct { type authServer struct {
pbauth.UnimplementedAuthServer pbauth.UnimplementedAuthServer
authDatabase controller.AuthDatabase authDatabase controller.AuthDatabase
RegisterCenter discovery.SvcDiscoveryRegistry RegisterCenter discovery.Conn
config *Config config *Config
userClient *rpcli.UserClient userClient *rpcli.UserClient
} }
@ -51,15 +54,31 @@ type authServer struct {
type Config struct { type Config struct {
RpcConfig config.Auth RpcConfig config.Auth
RedisConfig config.Redis RedisConfig config.Redis
MongoConfig config.Mongo
Share config.Share Share config.Share
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
var token cache.TokenModel
if rdb == nil {
mdb, err := dbb.Mongo(ctx)
if err != nil {
return err
}
mc, err := mgo.NewCacheMgo(mdb.GetDB())
if err != nil {
return err
}
token = mcache.NewTokenCacheModel(mc, config.RpcConfig.TokenPolicy.Expire)
} else {
token = redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire)
}
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return err return err
@ -67,7 +86,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
pbauth.RegisterAuthServer(server, &authServer{ pbauth.RegisterAuthServer(server, &authServer{
RegisterCenter: client, RegisterCenter: client,
authDatabase: controller.NewAuthDatabase( authDatabase: controller.NewAuthDatabase(
redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire), token,
config.Share.Secret, config.Share.Secret,
config.RpcConfig.TokenPolicy.Expire, config.RpcConfig.TokenPolicy.Expire,
config.Share.MultiLogin, config.Share.MultiLogin,
@ -192,7 +211,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
return err return err
} }
for _, v := range conns { for _, v := range conns {
log.ZDebug(ctx, "forceKickOff", "conn", v.Target()) log.ZDebug(ctx, "forceKickOff", "userID", userID, "platformID", platformID)
client := msggateway.NewMsgGatewayClient(v) client := msggateway.NewMsgGatewayClient(v)
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID} kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
_, err := client.KickUserOffline(ctx, kickReq) _, err := client.KickUserOffline(ctx, kickReq)

View File

@ -19,24 +19,22 @@ import (
"sort" "sort"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
dbModel "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" dbModel "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbconversation "github.com/openimsdk/protocol/conversation" pbconversation "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
@ -66,12 +64,13 @@ type Config struct {
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -96,7 +95,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
pbconversation.RegisterConversationServer(server, &conversationServer{ pbconversation.RegisterConversationServer(server, &conversationServer{
conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, msgClient), conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, msgClient),
conversationDatabase: controller.NewConversationDatabase(conversationDB, conversationDatabase: controller.NewConversationDatabase(conversationDB,
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, redis.GetRocksCacheOptions(), conversationDB), mgocli.GetTx()), redis.NewConversationRedis(rdb, &config.LocalCacheConfig, conversationDB), mgocli.GetTx()),
userClient: rpcli.NewUserClient(userConn), userClient: rpcli.NewUserClient(userConn),
groupClient: rpcli.NewGroupClient(groupConn), groupClient: rpcli.NewGroupClient(groupConn),
msgClient: msgClient, msgClient: msgClient,

View File

@ -17,13 +17,15 @@ package group
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"math/big" "math/big"
"math/rand" "math/rand"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct" "github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
@ -42,8 +44,6 @@ import (
pbgroup "github.com/openimsdk/protocol/group" pbgroup "github.com/openimsdk/protocol/group"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/wrapperspb" "github.com/openimsdk/protocol/wrapperspb"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
@ -76,12 +76,13 @@ type Config struct {
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -97,11 +98,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
//msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
//conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return err return err

View File

@ -16,22 +16,24 @@ package msg
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/notification" "github.com/openimsdk/open-im-server/v3/pkg/notification"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache" "github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/conversation" "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -56,8 +58,8 @@ type Config struct {
// MsgServer encapsulates dependencies required for message handling. // MsgServer encapsulates dependencies required for message handling.
type msgServer struct { type msgServer struct {
msg.UnimplementedMsgServer msg.UnimplementedMsgServer
RegisterCenter discovery.SvcDiscoveryRegistry // Service discovery registry for service registration. RegisterCenter discovery.Conn // Service discovery registry for service registration.
MsgDatabase controller.CommonMsgDatabase // Interface for message database operations. MsgDatabase controller.CommonMsgDatabase // Interface for message database operations.
StreamMsgDatabase controller.StreamMsgDatabase StreamMsgDatabase controller.StreamMsgDatabase
UserLocalCache *rpccache.UserLocalCache // Local cache for user data. UserLocalCache *rpccache.UserLocalCache // Local cache for user data.
FriendLocalCache *rpccache.FriendLocalCache // Local cache for friend data. FriendLocalCache *rpccache.FriendLocalCache // Local cache for friend data.
@ -76,12 +78,18 @@ func (m *msgServer) addInterceptorHandler(interceptorFunc ...MessageInterceptorF
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) builder := mqbuild.NewBuilder(&config.KafkaConfig)
redisProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToRedisTopic)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil {
return err
}
rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -89,7 +97,16 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
msgModel := redis.NewMsgCache(rdb, msgDocModel) var msgModel cache.MsgCache
if rdb == nil {
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
if err != nil {
return err
}
msgModel = mcache.NewMsgCache(cm, msgDocModel)
} else {
msgModel = redis.NewMsgCache(rdb, msgDocModel)
}
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB()) seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil { if err != nil {
return err return err
@ -104,10 +121,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err return err
} }
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser) seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User) userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
if err != nil { if err != nil {
return err return err
@ -125,6 +138,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err return err
} }
conversationClient := rpcli.NewConversationClient(conversationConn) conversationClient := rpcli.NewConversationClient(conversationConn)
msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, redisProducer)
s := &msgServer{ s := &msgServer{
MsgDatabase: msgDatabase, MsgDatabase: msgDatabase,
StreamMsgDatabase: controller.NewStreamMsgDatabase(streamMsg), StreamMsgDatabase: controller.NewStreamMsgDatabase(streamMsg),

View File

@ -16,26 +16,25 @@ package relation
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/tools/mq/memamq" "github.com/openimsdk/tools/mq/memamq"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/relation" "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
@ -47,7 +46,7 @@ type friendServer struct {
db controller.FriendDatabase db controller.FriendDatabase
blackDatabase controller.BlackDatabase blackDatabase controller.BlackDatabase
notificationSender *FriendNotificationSender notificationSender *FriendNotificationSender
RegisterCenter discovery.SvcDiscoveryRegistry RegisterCenter discovery.Conn
config *Config config *Config
webhookClient *webhook.Client webhookClient *webhook.Client
queue *memamq.MemoryQueue queue *memamq.MemoryQueue
@ -66,12 +65,13 @@ type Config struct {
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -114,12 +114,12 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
db: controller.NewFriendDatabase( db: controller.NewFriendDatabase(
friendMongoDB, friendMongoDB,
friendRequestMongoDB, friendRequestMongoDB,
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, redis.GetRocksCacheOptions()), redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB),
mgocli.GetTx(), mgocli.GetTx(),
), ),
blackDatabase: controller.NewBlackDatabase( blackDatabase: controller.NewBlackDatabase(
blackMongoDB, blackMongoDB,
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, redis.GetRocksCacheOptions()), redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB),
), ),
notificationSender: notificationSender, notificationSender: notificationSender,
RegisterCenter: client, RegisterCenter: client,

View File

@ -19,11 +19,12 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"path" "path"
"strconv" "strconv"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
@ -37,7 +38,10 @@ import (
) )
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) { func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
limit := t.s3dataBase.PartLimit() limit, err := t.s3dataBase.PartLimit()
if err != nil {
return nil, err
}
return &third.PartLimitResp{ return &third.PartLimitResp{
MinPartSize: limit.MinPartSize, MinPartSize: limit.MinPartSize,
MaxPartSize: limit.MaxPartSize, MaxPartSize: limit.MaxPartSize,

View File

@ -17,9 +17,14 @@ package third
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/tools/s3/disable"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
@ -29,8 +34,6 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/protocol/third" "github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/s3" "github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cos" "github.com/openimsdk/tools/s3/cos"
@ -60,15 +63,17 @@ type Config struct {
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
logdb, err := mgo.NewLogMongo(mgocli.GetDB()) logdb, err := mgo.NewLogMongo(mgocli.GetDB())
if err != nil { if err != nil {
return err return err
@ -77,15 +82,31 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
if err != nil { if err != nil {
return err return err
} }
var thirdCache cache.ThirdCache
if rdb == nil {
tc, err := mgo.NewCacheMgo(mgocli.GetDB())
if err != nil {
return err
}
thirdCache = mcache.NewThirdCache(tc)
} else {
thirdCache = redis.NewThirdCache(rdb)
}
// Select the oss method according to the profile policy // Select the oss method according to the profile policy
enable := config.RpcConfig.Object.Enable var o s3.Interface
var ( switch enable := config.RpcConfig.Object.Enable; enable {
o s3.Interface
)
switch enable {
case "minio": case "minio":
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build()) var minioCache minio.Cache
if rdb == nil {
mc, err := mgo.NewCacheMgo(mgocli.GetDB())
if err != nil {
return err
}
minioCache = mcache.NewMinioCache(mc)
} else {
minioCache = redis.NewMinioCache(rdb)
}
o, err = minio.NewMinio(ctx, minioCache, *config.MinioConfig.Build())
case "cos": case "cos":
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build()) o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
case "oss": case "oss":
@ -94,6 +115,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build()) o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
case "aws": case "aws":
o, err = aws.NewAws(*config.RpcConfig.Object.Aws.Build()) o, err = aws.NewAws(*config.RpcConfig.Object.Aws.Build())
case "":
o = disable.NewDisable()
default: default:
err = fmt.Errorf("invalid object enable: %s", enable) err = fmt.Errorf("invalid object enable: %s", enable)
} }
@ -106,7 +129,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
} }
localcache.InitLocalCache(&config.LocalCacheConfig) localcache.InitLocalCache(&config.LocalCacheConfig)
third.RegisterThirdServer(server, &thirdServer{ third.RegisterThirdServer(server, &thirdServer{
thirdDatabase: controller.NewThirdDatabase(redis.NewThirdCache(rdb), logdb), thirdDatabase: controller.NewThirdDatabase(thirdCache, logdb),
s3dataBase: controller.NewS3Database(rdb, o, s3db), s3dataBase: controller.NewS3Database(rdb, o, s3db),
defaultExpire: time.Hour * 24 * 7, defaultExpire: time.Hour * 24 * 7,
config: config, config: config,

View File

@ -23,29 +23,27 @@ import (
"time" "time"
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation" "github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/rpcli"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/group" "github.com/openimsdk/protocol/group"
friendpb "github.com/openimsdk/protocol/relation" friendpb "github.com/openimsdk/protocol/relation"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
pbuser "github.com/openimsdk/protocol/user" pbuser "github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/db/pagination"
registry "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -57,7 +55,7 @@ type userServer struct {
db controller.UserDatabase db controller.UserDatabase
friendNotificationSender *relation.FriendNotificationSender friendNotificationSender *relation.FriendNotificationSender
userNotificationSender *UserNotificationSender userNotificationSender *UserNotificationSender
RegisterCenter registry.SvcDiscoveryRegistry RegisterCenter discovery.Conn
config *Config config *Config
webhookClient *webhook.Client webhookClient *webhook.Client
groupClient *rpcli.GroupClient groupClient *rpcli.GroupClient
@ -76,15 +74,17 @@ type Config struct {
Discovery config.Discovery Discovery config.Discovery
} }
func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegistry, server *grpc.Server) error { func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
mgocli, err := dbb.Mongo(ctx)
if err != nil { if err != nil {
return err return err
} }
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) rdb, err := dbb.Redis(ctx)
if err != nil { if err != nil {
return err return err
} }
users := make([]*tablerelation.User, 0) users := make([]*tablerelation.User, 0)
for _, v := range config.Share.IMAdminUserID { for _, v := range config.Share.IMAdminUserID {

View File

@ -1,47 +1,37 @@
package tools package cron
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd" disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
pbconversation "github.com/openimsdk/protocol/conversation" pbconversation "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/third" "github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/etcd" "github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/runtimeenv"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"google.golang.org/grpc"
) )
type CronTaskConfig struct { type Config struct {
CronTask config.CronTask CronTask config.CronTask
Share config.Share Share config.Share
Discovery config.Discovery Discovery config.Discovery
runTimeEnv string
} }
func Start(ctx context.Context, conf *CronTaskConfig) error { func Start(ctx context.Context, conf *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
conf.runTimeEnv = runtimeenv.PrintRuntimeEnvironment() log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", conf.runTimeEnv, "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
if conf.CronTask.RetainChatRecords < 1 { if conf.CronTask.RetainChatRecords < 1 {
return errs.New("msg destruct time must be greater than 1").Wrap() log.ZInfo(ctx, "disable cron")
<-ctx.Done()
return nil
} }
client, err := kdisc.NewDiscoveryRegister(&conf.Discovery, conf.runTimeEnv, nil)
if err != nil {
return errs.WrapMsg(err, "failed to register discovery service")
}
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
ctx = mcontext.SetOpUserID(ctx, conf.Share.IMAdminUserID[0]) ctx = mcontext.SetOpUserID(ctx, conf.Share.IMAdminUserID[0])
msgConn, err := client.GetConn(ctx, conf.Discovery.RpcService.Msg) msgConn, err := client.GetConn(ctx, conf.Discovery.RpcService.Msg)
@ -88,13 +78,15 @@ func Start(ctx context.Context, conf *CronTaskConfig) error {
} }
log.ZDebug(ctx, "start cron task", "CronExecuteTime", conf.CronTask.CronExecuteTime) log.ZDebug(ctx, "start cron task", "CronExecuteTime", conf.CronTask.CronExecuteTime)
srv.cron.Start() srv.cron.Start()
log.ZDebug(ctx, "cron task server is running")
<-ctx.Done() <-ctx.Done()
log.ZDebug(ctx, "cron task server is shutting down")
return nil return nil
} }
type cronServer struct { type cronServer struct {
ctx context.Context ctx context.Context
config *CronTaskConfig config *Config
cron *cron.Cron cron *cron.Cron
msgClient msg.MsgClient msgClient msg.MsgClient
conversationClient pbconversation.ConversationClient conversationClient pbconversation.ConversationClient

View File

@ -1,4 +1,4 @@
package tools package cron
import ( import (
"context" "context"
@ -24,7 +24,7 @@ func TestName(t *testing.T) {
Address: []string{"localhost:12379"}, Address: []string{"localhost:12379"},
}, },
} }
client, err := kdisc.NewDiscoveryRegister(conf, "source") client, err := kdisc.NewDiscoveryRegister(conf, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -1,12 +1,13 @@
package tools package cron
import ( import (
"fmt" "fmt"
"os"
"time"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"os"
"time"
) )
func (c *cronServer) deleteMsg() { func (c *cronServer) deleteMsg() {

View File

@ -1,12 +1,13 @@
package tools package cron
import ( import (
"fmt" "fmt"
"os"
"time"
"github.com/openimsdk/protocol/third" "github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"os"
"time"
) )
func (c *cronServer) clearS3() { func (c *cronServer) clearS3() {

View File

@ -1,12 +1,13 @@
package tools package cron
import ( import (
"fmt" "fmt"
"os"
"time"
pbconversation "github.com/openimsdk/protocol/conversation" pbconversation "github.com/openimsdk/protocol/conversation"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"os"
"time"
) )
func (c *cronServer) clearUserMsg() { func (c *cronServer) clearUserMsg() {

View File

@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/internal/api" "github.com/openimsdk/open-im-server/v3/internal/api"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/version" "github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -32,7 +33,7 @@ type ApiCmd struct {
} }
func NewApiCmd() *ApiCmd { func NewApiCmd() *ApiCmd {
apiConfig := api.Config{AllConfig: &config.AllConfig{}} var apiConfig api.Config
ret := &ApiCmd{apiConfig: &apiConfig} ret := &ApiCmd{apiConfig: &apiConfig}
ret.configMap = map[string]any{ ret.configMap = map[string]any{
config.DiscoveryConfigFilename: &apiConfig.Discovery, config.DiscoveryConfigFilename: &apiConfig.Discovery,
@ -61,7 +62,7 @@ func NewApiCmd() *ApiCmd {
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap)) ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
ret.ctx = context.WithValue(context.Background(), "version", version.Version) ret.ctx = context.WithValue(context.Background(), "version", version.Version)
ret.Command.RunE = func(cmd *cobra.Command, args []string) error { ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
apiConfig.ConfigPath = ret.configPath apiConfig.ConfigPath = config.Path(ret.configPath)
return ret.runE() return ret.runE()
} }
return ret return ret
@ -72,5 +73,22 @@ func (a *ApiCmd) Exec() error {
} }
func (a *ApiCmd) runE() error { func (a *ApiCmd) runE() error {
return api.Start(a.ctx, a.Index(), a.apiConfig) a.apiConfig.Index = config.Index(a.Index())
prometheus := config.Prometheus{
Enable: a.apiConfig.API.Prometheus.Enable,
Ports: a.apiConfig.API.Prometheus.Ports,
}
return startrpc.Start(
a.ctx, &a.apiConfig.Discovery,
&prometheus,
a.apiConfig.API.Api.ListenIP, "",
a.apiConfig.API.Prometheus.AutoSetPorts,
nil, int(a.apiConfig.Index),
a.apiConfig.Discovery.RpcService.MessageGateway,
&a.apiConfig.Notification,
a.apiConfig,
[]string{},
[]string{},
api.Start,
)
} }

View File

@ -38,6 +38,7 @@ func NewAuthRpcCmd() *AuthRpcCmd {
ret.configMap = map[string]any{ ret.configMap = map[string]any{
config.OpenIMRPCAuthCfgFileName: &authConfig.RpcConfig, config.OpenIMRPCAuthCfgFileName: &authConfig.RpcConfig,
config.RedisConfigFileName: &authConfig.RedisConfig, config.RedisConfigFileName: &authConfig.RedisConfig,
config.MongodbConfigFileName: &authConfig.MongoConfig,
config.ShareFileName: &authConfig.Share, config.ShareFileName: &authConfig.Share,
config.DiscoveryConfigFilename: &authConfig.Discovery, config.DiscoveryConfigFilename: &authConfig.Discovery,
} }

View File

@ -17,8 +17,9 @@ package cmd
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/internal/tools" "github.com/openimsdk/open-im-server/v3/internal/tools/cron"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/version" "github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -28,11 +29,11 @@ type CronTaskCmd struct {
*RootCmd *RootCmd
ctx context.Context ctx context.Context
configMap map[string]any configMap map[string]any
cronTaskConfig *tools.CronTaskConfig cronTaskConfig *cron.Config
} }
func NewCronTaskCmd() *CronTaskCmd { func NewCronTaskCmd() *CronTaskCmd {
var cronTaskConfig tools.CronTaskConfig var cronTaskConfig cron.Config
ret := &CronTaskCmd{cronTaskConfig: &cronTaskConfig} ret := &CronTaskCmd{cronTaskConfig: &cronTaskConfig}
ret.configMap = map[string]any{ ret.configMap = map[string]any{
config.OpenIMCronTaskCfgFileName: &cronTaskConfig.CronTask, config.OpenIMCronTaskCfgFileName: &cronTaskConfig.CronTask,
@ -52,5 +53,18 @@ func (a *CronTaskCmd) Exec() error {
} }
func (a *CronTaskCmd) runE() error { func (a *CronTaskCmd) runE() error {
return tools.Start(a.ctx, a.cronTaskConfig) var prometheus config.Prometheus
return startrpc.Start(
a.ctx, &a.cronTaskConfig.Discovery,
&prometheus,
"", "",
true,
nil, 0,
"",
nil,
a.cronTaskConfig,
[]string{},
[]string{},
cron.Start,
)
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/internal/msggateway" "github.com/openimsdk/open-im-server/v3/internal/msggateway"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/version" "github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
@ -55,5 +56,20 @@ func (m *MsgGatewayCmd) Exec() error {
} }
func (m *MsgGatewayCmd) runE() error { func (m *MsgGatewayCmd) runE() error {
return msggateway.Start(m.ctx, m.Index(), m.msgGatewayConfig) m.msgGatewayConfig.Index = config.Index(m.Index())
rpc := m.msgGatewayConfig.MsgGateway.RPC
var prometheus config.Prometheus
return startrpc.Start(
m.ctx, &m.msgGatewayConfig.Discovery,
&prometheus,
rpc.ListenIP, rpc.RegisterIP,
rpc.AutoSetPorts,
rpc.Ports, int(m.msgGatewayConfig.Index),
m.msgGatewayConfig.Discovery.RpcService.MessageGateway,
nil,
m.msgGatewayConfig,
[]string{},
[]string{},
msggateway.Start,
)
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer" "github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/version" "github.com/openimsdk/open-im-server/v3/version"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -56,5 +57,19 @@ func (m *MsgTransferCmd) Exec() error {
} }
func (m *MsgTransferCmd) runE() error { func (m *MsgTransferCmd) runE() error {
return msgtransfer.Start(m.ctx, m.Index(), m.msgTransferConfig) m.msgTransferConfig.Index = config.Index(m.Index())
var prometheus config.Prometheus
return startrpc.Start(
m.ctx, &m.msgTransferConfig.Discovery,
&prometheus,
"", "",
true,
nil, int(m.msgTransferConfig.Index),
"",
nil,
m.msgTransferConfig,
[]string{},
[]string{},
msgtransfer.Start,
)
} }

View File

@ -38,6 +38,7 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.configMap = map[string]any{ ret.configMap = map[string]any{
config.OpenIMPushCfgFileName: &pushConfig.RpcConfig, config.OpenIMPushCfgFileName: &pushConfig.RpcConfig,
config.RedisConfigFileName: &pushConfig.RedisConfig, config.RedisConfigFileName: &pushConfig.RedisConfig,
config.MongodbConfigFileName: &pushConfig.MongoConfig,
config.KafkaConfigFileName: &pushConfig.KafkaConfig, config.KafkaConfigFileName: &pushConfig.KafkaConfig,
config.ShareFileName: &pushConfig.Share, config.ShareFileName: &pushConfig.Share,
config.NotificationFileName: &pushConfig.NotificationConfig, config.NotificationFileName: &pushConfig.NotificationConfig,
@ -48,7 +49,7 @@ func NewPushRpcCmd() *PushRpcCmd {
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap)) ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
ret.ctx = context.WithValue(context.Background(), "version", version.Version) ret.ctx = context.WithValue(context.Background(), "version", version.Version)
ret.Command.RunE = func(cmd *cobra.Command, args []string) error { ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
ret.pushConfig.FcmConfigPath = ret.ConfigPath() ret.pushConfig.FcmConfigPath = config.Path(ret.ConfigPath())
return ret.runE() return ret.runE()
} }
return ret return ret

View File

@ -12,7 +12,6 @@ import (
"github.com/openimsdk/tools/discovery/etcd" "github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/runtimeenv"
"github.com/spf13/cobra" "github.com/spf13/cobra"
clientv3 "go.etcd.io/etcd/client/v3" clientv3 "go.etcd.io/etcd/client/v3"
) )
@ -86,14 +85,12 @@ func (r *RootCmd) initEtcd() error {
return err return err
} }
disConfig := config.Discovery{} disConfig := config.Discovery{}
env := runtimeenv.PrintRuntimeEnvironment() err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename], &disConfig)
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename],
env, &disConfig)
if err != nil { if err != nil {
return err return err
} }
if disConfig.Enable == config.ETCD { if disConfig.Enable == config.ETCD {
discov, _ := kdisc.NewDiscoveryRegister(&disConfig, env, nil) discov, _ := kdisc.NewDiscoveryRegister(&disConfig, nil)
r.etcdClient = discov.(*etcd.SvcDiscoveryRegistryImpl).GetClient() r.etcdClient = discov.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
} }
return nil return nil
@ -125,18 +122,16 @@ func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) err
return err return err
} }
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
// Load common configuration file // Load common configuration file
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share} //opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
for configFileName, configStruct := range opts.configMap { for configFileName, configStruct := range opts.configMap {
err := config.Load(configDirectory, configFileName, config.EnvPrefixMap[configFileName], runtimeEnv, configStruct) err := config.Load(configDirectory, configFileName, config.EnvPrefixMap[configFileName], configStruct)
if err != nil { if err != nil {
return err return err
} }
} }
// Load common log configuration file // Load common log configuration file
return config.Load(configDirectory, config.LogConfigFileName, config.EnvPrefixMap[config.LogConfigFileName], runtimeEnv, &r.log) return config.Load(configDirectory, config.LogConfigFileName, config.EnvPrefixMap[config.LogConfigFileName], &r.log)
} }
func (r *RootCmd) updateConfigFromEtcd(opts *CmdOpts) error { func (r *RootCmd) updateConfigFromEtcd(opts *CmdOpts) error {
@ -208,7 +203,6 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error { func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
err := log.InitLoggerFromConfig( err := log.InitLoggerFromConfig(
cmdOpts.loggerPrefixName, cmdOpts.loggerPrefixName,
r.processName, r.processName,
"", "", "", "",

View File

@ -28,378 +28,348 @@ import (
"github.com/openimsdk/tools/s3/oss" "github.com/openimsdk/tools/s3/oss"
) )
const StructTagName = "yaml"
type Path string
type Index int
type CacheConfig struct { type CacheConfig struct {
Topic string `mapstructure:"topic"` Topic string `yaml:"topic"`
SlotNum int `mapstructure:"slotNum"` SlotNum int `yaml:"slotNum"`
SlotSize int `mapstructure:"slotSize"` SlotSize int `yaml:"slotSize"`
SuccessExpire int `mapstructure:"successExpire"` SuccessExpire int `yaml:"successExpire"`
FailedExpire int `mapstructure:"failedExpire"` FailedExpire int `yaml:"failedExpire"`
} }
type LocalCache struct { type LocalCache struct {
User CacheConfig `mapstructure:"user"` User CacheConfig `yaml:"user"`
Group CacheConfig `mapstructure:"group"` Group CacheConfig `yaml:"group"`
Friend CacheConfig `mapstructure:"friend"` Friend CacheConfig `yaml:"friend"`
Conversation CacheConfig `mapstructure:"conversation"` Conversation CacheConfig `yaml:"conversation"`
} }
type Log struct { type Log struct {
StorageLocation string `mapstructure:"storageLocation"` StorageLocation string `yaml:"storageLocation"`
RotationTime uint `mapstructure:"rotationTime"` RotationTime uint `yaml:"rotationTime"`
RemainRotationCount uint `mapstructure:"remainRotationCount"` RemainRotationCount uint `yaml:"remainRotationCount"`
RemainLogLevel int `mapstructure:"remainLogLevel"` RemainLogLevel int `yaml:"remainLogLevel"`
IsStdout bool `mapstructure:"isStdout"` IsStdout bool `yaml:"isStdout"`
IsJson bool `mapstructure:"isJson"` IsJson bool `yaml:"isJson"`
IsSimplify bool `mapstructure:"isSimplify"` IsSimplify bool `yaml:"isSimplify"`
WithStack bool `mapstructure:"withStack"` WithStack bool `yaml:"withStack"`
} }
type Minio struct { type Minio struct {
Bucket string `mapstructure:"bucket"` Bucket string `yaml:"bucket"`
AccessKeyID string `mapstructure:"accessKeyID"` AccessKeyID string `yaml:"accessKeyID"`
SecretAccessKey string `mapstructure:"secretAccessKey"` SecretAccessKey string `yaml:"secretAccessKey"`
SessionToken string `mapstructure:"sessionToken"` SessionToken string `yaml:"sessionToken"`
InternalAddress string `mapstructure:"internalAddress"` InternalAddress string `yaml:"internalAddress"`
ExternalAddress string `mapstructure:"externalAddress"` ExternalAddress string `yaml:"externalAddress"`
PublicRead bool `mapstructure:"publicRead"` PublicRead bool `yaml:"publicRead"`
} }
type Mongo struct { type Mongo struct {
URI string `mapstructure:"uri"` URI string `yaml:"uri"`
Address []string `mapstructure:"address"` Address []string `yaml:"address"`
Database string `mapstructure:"database"` Database string `yaml:"database"`
Username string `mapstructure:"username"` Username string `yaml:"username"`
Password string `mapstructure:"password"` Password string `yaml:"password"`
AuthSource string `mapstructure:"authSource"` AuthSource string `yaml:"authSource"`
MaxPoolSize int `mapstructure:"maxPoolSize"` MaxPoolSize int `yaml:"maxPoolSize"`
MaxRetry int `mapstructure:"maxRetry"` MaxRetry int `yaml:"maxRetry"`
} }
type Kafka struct { type Kafka struct {
Username string `mapstructure:"username"` Username string `yaml:"username"`
Password string `mapstructure:"password"` Password string `yaml:"password"`
ProducerAck string `mapstructure:"producerAck"` ProducerAck string `yaml:"producerAck"`
CompressType string `mapstructure:"compressType"` CompressType string `yaml:"compressType"`
Address []string `mapstructure:"address"` Address []string `yaml:"address"`
ToRedisTopic string `mapstructure:"toRedisTopic"` ToRedisTopic string `yaml:"toRedisTopic"`
ToMongoTopic string `mapstructure:"toMongoTopic"` ToMongoTopic string `yaml:"toMongoTopic"`
ToPushTopic string `mapstructure:"toPushTopic"` ToPushTopic string `yaml:"toPushTopic"`
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"` ToOfflinePushTopic string `yaml:"toOfflinePushTopic"`
ToRedisGroupID string `mapstructure:"toRedisGroupID"` ToRedisGroupID string `yaml:"toRedisGroupID"`
ToMongoGroupID string `mapstructure:"toMongoGroupID"` ToMongoGroupID string `yaml:"toMongoGroupID"`
ToPushGroupID string `mapstructure:"toPushGroupID"` ToPushGroupID string `yaml:"toPushGroupID"`
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"` ToOfflineGroupID string `yaml:"toOfflinePushGroupID"`
Tls TLSConfig `mapstructure:"tls"` Tls TLSConfig `yaml:"tls"`
} }
type TLSConfig struct { type TLSConfig struct {
EnableTLS bool `mapstructure:"enableTLS"` EnableTLS bool `yaml:"enableTLS"`
CACrt string `mapstructure:"caCrt"` CACrt string `yaml:"caCrt"`
ClientCrt string `mapstructure:"clientCrt"` ClientCrt string `yaml:"clientCrt"`
ClientKey string `mapstructure:"clientKey"` ClientKey string `yaml:"clientKey"`
ClientKeyPwd string `mapstructure:"clientKeyPwd"` ClientKeyPwd string `yaml:"clientKeyPwd"`
InsecureSkipVerify bool `mapstructure:"insecureSkipVerify"` InsecureSkipVerify bool `yaml:"insecureSkipVerify"`
} }
type API struct { type API struct {
Api struct { Api struct {
ListenIP string `mapstructure:"listenIP"` ListenIP string `yaml:"listenIP"`
Ports []int `mapstructure:"ports"` Ports []int `yaml:"ports"`
CompressionLevel int `mapstructure:"compressionLevel"` CompressionLevel int `yaml:"compressionLevel"`
} `mapstructure:"api"` } `yaml:"api"`
Prometheus struct { Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
AutoSetPorts bool `mapstructure:"autoSetPorts"` AutoSetPorts bool `yaml:"autoSetPorts"`
Ports []int `mapstructure:"ports"` Ports []int `yaml:"ports"`
GrafanaURL string `mapstructure:"grafanaURL"` GrafanaURL string `yaml:"grafanaURL"`
} `mapstructure:"prometheus"` } `yaml:"prometheus"`
} }
type CronTask struct { type CronTask struct {
CronExecuteTime string `mapstructure:"cronExecuteTime"` CronExecuteTime string `yaml:"cronExecuteTime"`
RetainChatRecords int `mapstructure:"retainChatRecords"` RetainChatRecords int `yaml:"retainChatRecords"`
FileExpireTime int `mapstructure:"fileExpireTime"` FileExpireTime int `yaml:"fileExpireTime"`
DeleteObjectType []string `mapstructure:"deleteObjectType"` DeleteObjectType []string `yaml:"deleteObjectType"`
} }
type OfflinePushConfig struct { type OfflinePushConfig struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
Title string `mapstructure:"title"` Title string `yaml:"title"`
Desc string `mapstructure:"desc"` Desc string `yaml:"desc"`
Ext string `mapstructure:"ext"` Ext string `yaml:"ext"`
} }
type NotificationConfig struct { type NotificationConfig struct {
IsSendMsg bool `mapstructure:"isSendMsg"` IsSendMsg bool `yaml:"isSendMsg"`
ReliabilityLevel int `mapstructure:"reliabilityLevel"` ReliabilityLevel int `yaml:"reliabilityLevel"`
UnreadCount bool `mapstructure:"unreadCount"` UnreadCount bool `yaml:"unreadCount"`
OfflinePush OfflinePushConfig `mapstructure:"offlinePush"` OfflinePush OfflinePushConfig `yaml:"offlinePush"`
} }
type Notification struct { type Notification struct {
GroupCreated NotificationConfig `mapstructure:"groupCreated"` GroupCreated NotificationConfig `yaml:"groupCreated"`
GroupInfoSet NotificationConfig `mapstructure:"groupInfoSet"` GroupInfoSet NotificationConfig `yaml:"groupInfoSet"`
JoinGroupApplication NotificationConfig `mapstructure:"joinGroupApplication"` JoinGroupApplication NotificationConfig `yaml:"joinGroupApplication"`
MemberQuit NotificationConfig `mapstructure:"memberQuit"` MemberQuit NotificationConfig `yaml:"memberQuit"`
GroupApplicationAccepted NotificationConfig `mapstructure:"groupApplicationAccepted"` GroupApplicationAccepted NotificationConfig `yaml:"groupApplicationAccepted"`
GroupApplicationRejected NotificationConfig `mapstructure:"groupApplicationRejected"` GroupApplicationRejected NotificationConfig `yaml:"groupApplicationRejected"`
GroupOwnerTransferred NotificationConfig `mapstructure:"groupOwnerTransferred"` GroupOwnerTransferred NotificationConfig `yaml:"groupOwnerTransferred"`
MemberKicked NotificationConfig `mapstructure:"memberKicked"` MemberKicked NotificationConfig `yaml:"memberKicked"`
MemberInvited NotificationConfig `mapstructure:"memberInvited"` MemberInvited NotificationConfig `yaml:"memberInvited"`
MemberEnter NotificationConfig `mapstructure:"memberEnter"` MemberEnter NotificationConfig `yaml:"memberEnter"`
GroupDismissed NotificationConfig `mapstructure:"groupDismissed"` GroupDismissed NotificationConfig `yaml:"groupDismissed"`
GroupMuted NotificationConfig `mapstructure:"groupMuted"` GroupMuted NotificationConfig `yaml:"groupMuted"`
GroupCancelMuted NotificationConfig `mapstructure:"groupCancelMuted"` GroupCancelMuted NotificationConfig `yaml:"groupCancelMuted"`
GroupMemberMuted NotificationConfig `mapstructure:"groupMemberMuted"` GroupMemberMuted NotificationConfig `yaml:"groupMemberMuted"`
GroupMemberCancelMuted NotificationConfig `mapstructure:"groupMemberCancelMuted"` GroupMemberCancelMuted NotificationConfig `yaml:"groupMemberCancelMuted"`
GroupMemberInfoSet NotificationConfig `mapstructure:"groupMemberInfoSet"` GroupMemberInfoSet NotificationConfig `yaml:"groupMemberInfoSet"`
GroupMemberSetToAdmin NotificationConfig `yaml:"groupMemberSetToAdmin"` GroupMemberSetToAdmin NotificationConfig `yaml:"groupMemberSetToAdmin"`
GroupMemberSetToOrdinary NotificationConfig `yaml:"groupMemberSetToOrdinaryUser"` GroupMemberSetToOrdinary NotificationConfig `yaml:"groupMemberSetToOrdinaryUser"`
GroupInfoSetAnnouncement NotificationConfig `mapstructure:"groupInfoSetAnnouncement"` GroupInfoSetAnnouncement NotificationConfig `yaml:"groupInfoSetAnnouncement"`
GroupInfoSetName NotificationConfig `mapstructure:"groupInfoSetName"` GroupInfoSetName NotificationConfig `yaml:"groupInfoSetName"`
FriendApplicationAdded NotificationConfig `mapstructure:"friendApplicationAdded"` FriendApplicationAdded NotificationConfig `yaml:"friendApplicationAdded"`
FriendApplicationApproved NotificationConfig `mapstructure:"friendApplicationApproved"` FriendApplicationApproved NotificationConfig `yaml:"friendApplicationApproved"`
FriendApplicationRejected NotificationConfig `mapstructure:"friendApplicationRejected"` FriendApplicationRejected NotificationConfig `yaml:"friendApplicationRejected"`
FriendAdded NotificationConfig `mapstructure:"friendAdded"` FriendAdded NotificationConfig `yaml:"friendAdded"`
FriendDeleted NotificationConfig `mapstructure:"friendDeleted"` FriendDeleted NotificationConfig `yaml:"friendDeleted"`
FriendRemarkSet NotificationConfig `mapstructure:"friendRemarkSet"` FriendRemarkSet NotificationConfig `yaml:"friendRemarkSet"`
BlackAdded NotificationConfig `mapstructure:"blackAdded"` BlackAdded NotificationConfig `yaml:"blackAdded"`
BlackDeleted NotificationConfig `mapstructure:"blackDeleted"` BlackDeleted NotificationConfig `yaml:"blackDeleted"`
FriendInfoUpdated NotificationConfig `mapstructure:"friendInfoUpdated"` FriendInfoUpdated NotificationConfig `yaml:"friendInfoUpdated"`
UserInfoUpdated NotificationConfig `mapstructure:"userInfoUpdated"` UserInfoUpdated NotificationConfig `yaml:"userInfoUpdated"`
UserStatusChanged NotificationConfig `mapstructure:"userStatusChanged"` UserStatusChanged NotificationConfig `yaml:"userStatusChanged"`
ConversationChanged NotificationConfig `mapstructure:"conversationChanged"` ConversationChanged NotificationConfig `yaml:"conversationChanged"`
ConversationSetPrivate NotificationConfig `mapstructure:"conversationSetPrivate"` ConversationSetPrivate NotificationConfig `yaml:"conversationSetPrivate"`
} }
type Prometheus struct { type Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
Ports []int `mapstructure:"ports"` Ports []int `yaml:"ports"`
} }
type MsgGateway struct { type MsgGateway struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
AutoSetPorts bool `mapstructure:"autoSetPorts"` ListenIP string `yaml:"listenIP"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
ListenIP string `mapstructure:"listenIP"`
LongConnSvr struct { LongConnSvr struct {
Ports []int `mapstructure:"ports"` Ports []int `yaml:"ports"`
WebsocketMaxConnNum int `mapstructure:"websocketMaxConnNum"` WebsocketMaxConnNum int `yaml:"websocketMaxConnNum"`
WebsocketMaxMsgLen int `mapstructure:"websocketMaxMsgLen"` WebsocketMaxMsgLen int `yaml:"websocketMaxMsgLen"`
WebsocketTimeout int `mapstructure:"websocketTimeout"` WebsocketTimeout int `yaml:"websocketTimeout"`
} `mapstructure:"longConnSvr"` } `yaml:"longConnSvr"`
} }
type MsgTransfer struct { type MsgTransfer struct {
Prometheus struct { Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
AutoSetPorts bool `mapstructure:"autoSetPorts"` AutoSetPorts bool `yaml:"autoSetPorts"`
Ports []int `mapstructure:"ports"` Ports []int `yaml:"ports"`
} `mapstructure:"prometheus"` } `yaml:"prometheus"`
} }
type Push struct { type Push struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"` MaxConcurrentWorkers int `yaml:"maxConcurrentWorkers"`
AutoSetPorts bool `mapstructure:"autoSetPorts"` Enable string `yaml:"enable"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
MaxConcurrentWorkers int `mapstructure:"maxConcurrentWorkers"`
Enable string `mapstructure:"enable"`
GeTui struct { GeTui struct {
PushUrl string `mapstructure:"pushUrl"` PushUrl string `yaml:"pushUrl"`
MasterSecret string `mapstructure:"masterSecret"` MasterSecret string `yaml:"masterSecret"`
AppKey string `mapstructure:"appKey"` AppKey string `yaml:"appKey"`
Intent string `mapstructure:"intent"` Intent string `yaml:"intent"`
ChannelID string `mapstructure:"channelID"` ChannelID string `yaml:"channelID"`
ChannelName string `mapstructure:"channelName"` ChannelName string `yaml:"channelName"`
} `mapstructure:"geTui"` } `yaml:"geTui"`
FCM struct { FCM struct {
FilePath string `mapstructure:"filePath"` FilePath string `yaml:"filePath"`
AuthURL string `mapstructure:"authURL"` AuthURL string `yaml:"authURL"`
} `mapstructure:"fcm"` } `yaml:"fcm"`
JPush struct { JPush struct {
AppKey string `mapstructure:"appKey"` AppKey string `yaml:"appKey"`
MasterSecret string `mapstructure:"masterSecret"` MasterSecret string `yaml:"masterSecret"`
PushURL string `mapstructure:"pushURL"` PushURL string `yaml:"pushURL"`
PushIntent string `mapstructure:"pushIntent"` PushIntent string `yaml:"pushIntent"`
} `mapstructure:"jpush"` } `yaml:"jpush"`
IOSPush struct { IOSPush struct {
PushSound string `mapstructure:"pushSound"` PushSound string `yaml:"pushSound"`
BadgeCount bool `mapstructure:"badgeCount"` BadgeCount bool `yaml:"badgeCount"`
Production bool `mapstructure:"production"` Production bool `yaml:"production"`
} `mapstructure:"iosPush"` } `yaml:"iosPush"`
FullUserCache bool `mapstructure:"fullUserCache"` FullUserCache bool `yaml:"fullUserCache"`
} }
type Auth struct { type Auth struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
TokenPolicy struct { TokenPolicy struct {
Expire int64 `mapstructure:"expire"` Expire int64 `yaml:"expire"`
} `mapstructure:"tokenPolicy"` } `yaml:"tokenPolicy"`
} }
type Conversation struct { type Conversation struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
} }
type Friend struct { type Friend struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
} }
type Group struct { type Group struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"` EnableHistoryForNewMembers bool `yaml:"enableHistoryForNewMembers"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
EnableHistoryForNewMembers bool `mapstructure:"enableHistoryForNewMembers"`
} }
type Msg struct { type Msg struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"` FriendVerify bool `yaml:"friendVerify"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
FriendVerify bool `mapstructure:"friendVerify"`
} }
type Third struct { type Third struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"rpc"`
Prometheus Prometheus `mapstructure:"prometheus"`
Object struct { Object struct {
Enable string `mapstructure:"enable"` Enable string `yaml:"enable"`
Cos Cos `mapstructure:"cos"` Cos Cos `yaml:"cos"`
Oss Oss `mapstructure:"oss"` Oss Oss `yaml:"oss"`
Kodo Kodo `mapstructure:"kodo"` Kodo Kodo `yaml:"kodo"`
Aws Aws `mapstructure:"aws"` Aws Aws `yaml:"aws"`
} `mapstructure:"object"` } `yaml:"object"`
} }
type Cos struct { type Cos struct {
BucketURL string `mapstructure:"bucketURL"` BucketURL string `yaml:"bucketURL"`
SecretID string `mapstructure:"secretID"` SecretID string `yaml:"secretID"`
SecretKey string `mapstructure:"secretKey"` SecretKey string `yaml:"secretKey"`
SessionToken string `mapstructure:"sessionToken"` SessionToken string `yaml:"sessionToken"`
PublicRead bool `mapstructure:"publicRead"` PublicRead bool `yaml:"publicRead"`
} }
type Oss struct { type Oss struct {
Endpoint string `mapstructure:"endpoint"` Endpoint string `yaml:"endpoint"`
Bucket string `mapstructure:"bucket"` Bucket string `yaml:"bucket"`
BucketURL string `mapstructure:"bucketURL"` BucketURL string `yaml:"bucketURL"`
AccessKeyID string `mapstructure:"accessKeyID"` AccessKeyID string `yaml:"accessKeyID"`
AccessKeySecret string `mapstructure:"accessKeySecret"` AccessKeySecret string `yaml:"accessKeySecret"`
SessionToken string `mapstructure:"sessionToken"` SessionToken string `yaml:"sessionToken"`
PublicRead bool `mapstructure:"publicRead"` PublicRead bool `yaml:"publicRead"`
} }
type Kodo struct { type Kodo struct {
Endpoint string `mapstructure:"endpoint"` Endpoint string `yaml:"endpoint"`
Bucket string `mapstructure:"bucket"` Bucket string `yaml:"bucket"`
BucketURL string `mapstructure:"bucketURL"` BucketURL string `yaml:"bucketURL"`
AccessKeyID string `mapstructure:"accessKeyID"` AccessKeyID string `yaml:"accessKeyID"`
AccessKeySecret string `mapstructure:"accessKeySecret"` AccessKeySecret string `yaml:"accessKeySecret"`
SessionToken string `mapstructure:"sessionToken"` SessionToken string `yaml:"sessionToken"`
PublicRead bool `mapstructure:"publicRead"` PublicRead bool `yaml:"publicRead"`
} }
type Aws struct { type Aws struct {
Region string `mapstructure:"region"` Region string `yaml:"region"`
Bucket string `mapstructure:"bucket"` Bucket string `yaml:"bucket"`
AccessKeyID string `mapstructure:"accessKeyID"` AccessKeyID string `yaml:"accessKeyID"`
SecretAccessKey string `mapstructure:"secretAccessKey"` SecretAccessKey string `yaml:"secretAccessKey"`
SessionToken string `mapstructure:"sessionToken"` SessionToken string `yaml:"sessionToken"`
PublicRead bool `mapstructure:"publicRead"` PublicRead bool `yaml:"publicRead"`
} }
type User struct { type User struct {
RPC struct { RPC RPC `yaml:"rpc"`
RegisterIP string `mapstructure:"registerIP"` Prometheus Prometheus `yaml:"prometheus"`
ListenIP string `mapstructure:"listenIP"` }
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"` type RPC struct {
} `mapstructure:"rpc"` RegisterIP string `yaml:"registerIP"`
Prometheus Prometheus `mapstructure:"prometheus"` ListenIP string `yaml:"listenIP"`
AutoSetPorts bool `yaml:"autoSetPorts"`
Ports []int `yaml:"ports"`
} }
type Redis struct { type Redis struct {
Address []string `mapstructure:"address"` Disable bool `yaml:"-"`
Username string `mapstructure:"username"` Address []string `yaml:"address"`
Password string `mapstructure:"password"` Username string `yaml:"username"`
ClusterMode bool `mapstructure:"clusterMode"` Password string `yaml:"password"`
DB int `mapstructure:"storage"` ClusterMode bool `yaml:"clusterMode"`
MaxRetry int `mapstructure:"maxRetry"` DB int `yaml:"storage"`
PoolSize int `mapstructure:"poolSize"` MaxRetry int `yaml:"maxRetry"`
PoolSize int `yaml:"poolSize"`
} }
type BeforeConfig struct { type BeforeConfig struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
Timeout int `mapstructure:"timeout"` Timeout int `yaml:"timeout"`
FailedContinue bool `mapstructure:"failedContinue"` FailedContinue bool `yaml:"failedContinue"`
AllowedTypes []string `mapstructure:"allowedTypes"` AllowedTypes []string `yaml:"allowedTypes"`
DeniedTypes []string `mapstructure:"deniedTypes"` DeniedTypes []string `yaml:"deniedTypes"`
} }
type AfterConfig struct { type AfterConfig struct {
Enable bool `mapstructure:"enable"` Enable bool `yaml:"enable"`
Timeout int `mapstructure:"timeout"` Timeout int `yaml:"timeout"`
AttentionIds []string `mapstructure:"attentionIds"` AttentionIds []string `yaml:"attentionIds"`
AllowedTypes []string `mapstructure:"allowedTypes"` AllowedTypes []string `yaml:"allowedTypes"`
DeniedTypes []string `mapstructure:"deniedTypes"` DeniedTypes []string `yaml:"deniedTypes"`
} }
type Share struct { type Share struct {
Secret string `mapstructure:"secret"` Secret string `yaml:"secret"`
IMAdminUserID []string `mapstructure:"imAdminUserID"` IMAdminUserID []string `yaml:"imAdminUserID"`
MultiLogin MultiLogin `mapstructure:"multiLogin"` MultiLogin MultiLogin `yaml:"multiLogin"`
} }
type MultiLogin struct { type MultiLogin struct {
Policy int `mapstructure:"policy"` Policy int `yaml:"policy"`
MaxNumOneEnd int `mapstructure:"maxNumOneEnd"` MaxNumOneEnd int `yaml:"maxNumOneEnd"`
} }
type RpcService struct { type RpcService struct {
User string `mapstructure:"user"` User string `yaml:"user"`
Friend string `mapstructure:"friend"` Friend string `yaml:"friend"`
Msg string `mapstructure:"msg"` Msg string `yaml:"msg"`
Push string `mapstructure:"push"` Push string `yaml:"push"`
MessageGateway string `mapstructure:"messageGateway"` MessageGateway string `yaml:"messageGateway"`
Group string `mapstructure:"group"` Group string `yaml:"group"`
Auth string `mapstructure:"auth"` Auth string `yaml:"auth"`
Conversation string `mapstructure:"conversation"` Conversation string `yaml:"conversation"`
Third string `mapstructure:"third"` Third string `yaml:"third"`
} }
func (r *RpcService) GetServiceNames() []string { func (r *RpcService) GetServiceNames() []string {
@ -418,80 +388,80 @@ func (r *RpcService) GetServiceNames() []string {
// FullConfig stores all configurations for before and after events // FullConfig stores all configurations for before and after events
type Webhooks struct { type Webhooks struct {
URL string `mapstructure:"url"` URL string `yaml:"url"`
BeforeSendSingleMsg BeforeConfig `mapstructure:"beforeSendSingleMsg"` BeforeSendSingleMsg BeforeConfig `yaml:"beforeSendSingleMsg"`
BeforeUpdateUserInfoEx BeforeConfig `mapstructure:"beforeUpdateUserInfoEx"` BeforeUpdateUserInfoEx BeforeConfig `yaml:"beforeUpdateUserInfoEx"`
AfterUpdateUserInfoEx AfterConfig `mapstructure:"afterUpdateUserInfoEx"` AfterUpdateUserInfoEx AfterConfig `yaml:"afterUpdateUserInfoEx"`
AfterSendSingleMsg AfterConfig `mapstructure:"afterSendSingleMsg"` AfterSendSingleMsg AfterConfig `yaml:"afterSendSingleMsg"`
BeforeSendGroupMsg BeforeConfig `mapstructure:"beforeSendGroupMsg"` BeforeSendGroupMsg BeforeConfig `yaml:"beforeSendGroupMsg"`
BeforeMsgModify BeforeConfig `mapstructure:"beforeMsgModify"` BeforeMsgModify BeforeConfig `yaml:"beforeMsgModify"`
AfterSendGroupMsg AfterConfig `mapstructure:"afterSendGroupMsg"` AfterSendGroupMsg AfterConfig `yaml:"afterSendGroupMsg"`
AfterUserOnline AfterConfig `mapstructure:"afterUserOnline"` AfterUserOnline AfterConfig `yaml:"afterUserOnline"`
AfterUserOffline AfterConfig `mapstructure:"afterUserOffline"` AfterUserOffline AfterConfig `yaml:"afterUserOffline"`
AfterUserKickOff AfterConfig `mapstructure:"afterUserKickOff"` AfterUserKickOff AfterConfig `yaml:"afterUserKickOff"`
BeforeOfflinePush BeforeConfig `mapstructure:"beforeOfflinePush"` BeforeOfflinePush BeforeConfig `yaml:"beforeOfflinePush"`
BeforeOnlinePush BeforeConfig `mapstructure:"beforeOnlinePush"` BeforeOnlinePush BeforeConfig `yaml:"beforeOnlinePush"`
BeforeGroupOnlinePush BeforeConfig `mapstructure:"beforeGroupOnlinePush"` BeforeGroupOnlinePush BeforeConfig `yaml:"beforeGroupOnlinePush"`
BeforeAddFriend BeforeConfig `mapstructure:"beforeAddFriend"` BeforeAddFriend BeforeConfig `yaml:"beforeAddFriend"`
BeforeUpdateUserInfo BeforeConfig `mapstructure:"beforeUpdateUserInfo"` BeforeUpdateUserInfo BeforeConfig `yaml:"beforeUpdateUserInfo"`
AfterUpdateUserInfo AfterConfig `mapstructure:"afterUpdateUserInfo"` AfterUpdateUserInfo AfterConfig `yaml:"afterUpdateUserInfo"`
BeforeCreateGroup BeforeConfig `mapstructure:"beforeCreateGroup"` BeforeCreateGroup BeforeConfig `yaml:"beforeCreateGroup"`
AfterCreateGroup AfterConfig `mapstructure:"afterCreateGroup"` AfterCreateGroup AfterConfig `yaml:"afterCreateGroup"`
BeforeMemberJoinGroup BeforeConfig `mapstructure:"beforeMemberJoinGroup"` BeforeMemberJoinGroup BeforeConfig `yaml:"beforeMemberJoinGroup"`
BeforeSetGroupMemberInfo BeforeConfig `mapstructure:"beforeSetGroupMemberInfo"` BeforeSetGroupMemberInfo BeforeConfig `yaml:"beforeSetGroupMemberInfo"`
AfterSetGroupMemberInfo AfterConfig `mapstructure:"afterSetGroupMemberInfo"` AfterSetGroupMemberInfo AfterConfig `yaml:"afterSetGroupMemberInfo"`
AfterQuitGroup AfterConfig `mapstructure:"afterQuitGroup"` AfterQuitGroup AfterConfig `yaml:"afterQuitGroup"`
AfterKickGroupMember AfterConfig `mapstructure:"afterKickGroupMember"` AfterKickGroupMember AfterConfig `yaml:"afterKickGroupMember"`
AfterDismissGroup AfterConfig `mapstructure:"afterDismissGroup"` AfterDismissGroup AfterConfig `yaml:"afterDismissGroup"`
BeforeApplyJoinGroup BeforeConfig `mapstructure:"beforeApplyJoinGroup"` BeforeApplyJoinGroup BeforeConfig `yaml:"beforeApplyJoinGroup"`
AfterGroupMsgRead AfterConfig `mapstructure:"afterGroupMsgRead"` AfterGroupMsgRead AfterConfig `yaml:"afterGroupMsgRead"`
AfterSingleMsgRead AfterConfig `mapstructure:"afterSingleMsgRead"` AfterSingleMsgRead AfterConfig `yaml:"afterSingleMsgRead"`
BeforeUserRegister BeforeConfig `mapstructure:"beforeUserRegister"` BeforeUserRegister BeforeConfig `yaml:"beforeUserRegister"`
AfterUserRegister AfterConfig `mapstructure:"afterUserRegister"` AfterUserRegister AfterConfig `yaml:"afterUserRegister"`
AfterTransferGroupOwner AfterConfig `mapstructure:"afterTransferGroupOwner"` AfterTransferGroupOwner AfterConfig `yaml:"afterTransferGroupOwner"`
BeforeSetFriendRemark BeforeConfig `mapstructure:"beforeSetFriendRemark"` BeforeSetFriendRemark BeforeConfig `yaml:"beforeSetFriendRemark"`
AfterSetFriendRemark AfterConfig `mapstructure:"afterSetFriendRemark"` AfterSetFriendRemark AfterConfig `yaml:"afterSetFriendRemark"`
AfterGroupMsgRevoke AfterConfig `mapstructure:"afterGroupMsgRevoke"` AfterGroupMsgRevoke AfterConfig `yaml:"afterGroupMsgRevoke"`
AfterJoinGroup AfterConfig `mapstructure:"afterJoinGroup"` AfterJoinGroup AfterConfig `yaml:"afterJoinGroup"`
BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"` BeforeInviteUserToGroup BeforeConfig `yaml:"beforeInviteUserToGroup"`
AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"` AfterSetGroupInfo AfterConfig `yaml:"afterSetGroupInfo"`
BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"` BeforeSetGroupInfo BeforeConfig `yaml:"beforeSetGroupInfo"`
AfterSetGroupInfoEx AfterConfig `mapstructure:"afterSetGroupInfoEx"` AfterSetGroupInfoEx AfterConfig `yaml:"afterSetGroupInfoEx"`
BeforeSetGroupInfoEx BeforeConfig `mapstructure:"beforeSetGroupInfoEx"` BeforeSetGroupInfoEx BeforeConfig `yaml:"beforeSetGroupInfoEx"`
AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"` AfterRevokeMsg AfterConfig `yaml:"afterRevokeMsg"`
BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"` BeforeAddBlack BeforeConfig `yaml:"beforeAddBlack"`
AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"` AfterAddFriend AfterConfig `yaml:"afterAddFriend"`
BeforeAddFriendAgree BeforeConfig `mapstructure:"beforeAddFriendAgree"` BeforeAddFriendAgree BeforeConfig `yaml:"beforeAddFriendAgree"`
AfterAddFriendAgree AfterConfig `mapstructure:"afterAddFriendAgree"` AfterAddFriendAgree AfterConfig `yaml:"afterAddFriendAgree"`
AfterDeleteFriend AfterConfig `mapstructure:"afterDeleteFriend"` AfterDeleteFriend AfterConfig `yaml:"afterDeleteFriend"`
BeforeImportFriends BeforeConfig `mapstructure:"beforeImportFriends"` BeforeImportFriends BeforeConfig `yaml:"beforeImportFriends"`
AfterImportFriends AfterConfig `mapstructure:"afterImportFriends"` AfterImportFriends AfterConfig `yaml:"afterImportFriends"`
AfterRemoveBlack AfterConfig `mapstructure:"afterRemoveBlack"` AfterRemoveBlack AfterConfig `yaml:"afterRemoveBlack"`
} }
type ZooKeeper struct { type ZooKeeper struct {
Schema string `mapstructure:"schema"` Schema string `yaml:"schema"`
Address []string `mapstructure:"address"` Address []string `yaml:"address"`
Username string `mapstructure:"username"` Username string `yaml:"username"`
Password string `mapstructure:"password"` Password string `yaml:"password"`
} }
type Discovery struct { type Discovery struct {
Enable string `mapstructure:"enable"` Enable string `yaml:"enable"`
Etcd Etcd `mapstructure:"etcd"` Etcd Etcd `yaml:"etcd"`
Kubernetes Kubernetes `mapstructure:"kubernetes"` Kubernetes Kubernetes `yaml:"kubernetes"`
RpcService RpcService `mapstructure:"rpcService"` RpcService RpcService `yaml:"rpcService"`
} }
type Kubernetes struct { type Kubernetes struct {
Namespace string `mapstructure:"namespace"` Namespace string `yaml:"namespace"`
} }
type Etcd struct { type Etcd struct {
RootDirectory string `mapstructure:"rootDirectory"` RootDirectory string `yaml:"rootDirectory"`
Address []string `mapstructure:"address"` Address []string `yaml:"address"`
Username string `mapstructure:"username"` Username string `yaml:"username"`
Password string `mapstructure:"password"` Password string `yaml:"password"`
} }
func (m *Mongo) Build() *mongoutil.Config { func (m *Mongo) Build() *mongoutil.Config {
@ -783,7 +753,7 @@ func (a *AllConfig) GetConfigNames() []string {
} }
} }
var ( const (
FileName = "config.yaml" FileName = "config.yaml"
DiscoveryConfigFilename = "discovery.yml" DiscoveryConfigFilename = "discovery.yml"
KafkaConfigFileName = "kafka.yml" KafkaConfigFileName = "kafka.yml"

View File

@ -14,13 +14,16 @@
package config package config
import "github.com/openimsdk/tools/utils/runtimeenv"
const ConfKey = "conf" const ConfKey = "conf"
const ( const (
MountConfigFilePath = "CONFIG_PATH" MountConfigFilePath = "CONFIG_PATH"
DeploymentType = "DEPLOYMENT_TYPE" DeploymentType = "DEPLOYMENT_TYPE"
KUBERNETES = "kubernetes" KUBERNETES = runtimeenv.Kubernetes
ETCD = "etcd" ETCD = "etcd"
//Standalone = "standalone"
) )
const ( const (

View File

@ -0,0 +1,11 @@
package config
var standalone bool
func SetStandalone() {
standalone = true
}
func Standalone() bool {
return standalone
}

View File

@ -7,11 +7,12 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/runtimeenv"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func Load(configDirectory string, configFileName string, envPrefix string, runtimeEnv string, config any) error { func Load(configDirectory string, configFileName string, envPrefix string, config any) error {
if runtimeEnv == KUBERNETES { if runtimeenv.RuntimeEnvironment() == KUBERNETES {
mountPath := os.Getenv(MountConfigFilePath) mountPath := os.Getenv(MountConfigFilePath)
if mountPath == "" { if mountPath == "" {
return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty") return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty")
@ -35,7 +36,7 @@ func loadConfig(path string, envPrefix string, config any) error {
} }
if err := v.Unmarshal(config, func(config *mapstructure.DecoderConfig) { if err := v.Unmarshal(config, func(config *mapstructure.DecoderConfig) {
config.TagName = "mapstructure" config.TagName = StructTagName
}); err != nil { }); err != nil {
return errs.WrapMsg(err, "failed to unmarshal config", "path", path, "envPrefix", envPrefix) return errs.WrapMsg(err, "failed to unmarshal config", "path", path, "envPrefix", envPrefix)
} }

View File

@ -19,6 +19,8 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/standalone"
"github.com/openimsdk/tools/utils/runtimeenv"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/openimsdk/tools/discovery/kubernetes" "github.com/openimsdk/tools/discovery/kubernetes"
@ -28,8 +30,11 @@ import (
) )
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type. // NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string, watchNames []string) (discovery.SvcDiscoveryRegistry, error) { func NewDiscoveryRegister(discovery *config.Discovery, watchNames []string) (discovery.SvcDiscoveryRegistry, error) {
if runtimeEnv == config.KUBERNETES { if config.Standalone() {
return standalone.GetSvcDiscoveryRegistry(), nil
}
if runtimeenv.RuntimeEnvironment() == config.KUBERNETES {
return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace, return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace,
grpc.WithDefaultCallOptions( grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(1024*1024*20), grpc.MaxCallSendMsgSize(1024*1024*20),

View File

@ -1,10 +1,11 @@
package prommetrics package prommetrics
import ( import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"net" "net"
"strconv" "strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
var ( var (
@ -24,6 +25,10 @@ var (
) )
) )
func RegistryApi() {
registry.MustRegister(apiCounter, httpCounter)
}
func ApiInit(listener net.Listener) error { func ApiInit(listener net.Listener) error {
apiRegistry := prometheus.NewRegistry() apiRegistry := prometheus.NewRegistry()
cs := append( cs := append(
@ -41,9 +46,3 @@ func APICall(path string, method string, apiCode int) {
func HttpCall(path string, method string, status int) { func HttpCall(path string, method string, status int) {
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc() httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
} }
//func ApiHandler() http.Handler {
// return promhttp.InstrumentMetricHandler(
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
// )
//}

View File

@ -1,31 +0,0 @@
package prommetrics
import "fmt"
const (
APIKeyName = "api"
MessageTransferKeyName = "message-transfer"
)
type Target struct {
Target string `json:"target"`
Labels map[string]string `json:"labels"`
}
type RespTarget struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}
func BuildDiscoveryKey(name string) string {
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
}
func BuildDefaultTarget(host string, ip int) Target {
return Target{
Target: fmt.Sprintf("%s:%d", host, ip),
Labels: map[string]string{
"namespace": "default",
},
}
}

View File

@ -1,15 +0,0 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prommetrics // import "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"

View File

@ -24,3 +24,7 @@ var (
Help: "The number of user login", Help: "The number of user login",
}) })
) )
func RegistryAuth() {
registry.MustRegister(UserLoginCounter)
}

View File

@ -36,3 +36,12 @@ var (
Help: "The number of group chat msg failed processed", Help: "The number of group chat msg failed processed",
}) })
) )
func RegistryMsg() {
registry.MustRegister(
SingleChatMsgProcessSuccessCounter,
SingleChatMsgProcessFailedCounter,
GroupChatMsgProcessSuccessCounter,
GroupChatMsgProcessFailedCounter,
)
}

View File

@ -24,3 +24,7 @@ var (
Help: "The number of online user num", Help: "The number of online user num",
}) })
) )
func RegistryMsgGateway() {
registry.MustRegister(OnlineUserGauge)
}

View File

@ -28,3 +28,10 @@ var (
Help: "The number of messages with a push time exceeding 10 seconds", Help: "The number of messages with a push time exceeding 10 seconds",
}) })
) )
func RegistryPush() {
registry.MustRegister(
MsgOfflinePushFailedCounter,
MsgLoneTimePushCounter,
)
}

View File

@ -8,3 +8,7 @@ var (
Help: "The number of user login", Help: "The number of user login",
}) })
) )
func RegistryUser() {
registry.MustRegister(UserRegisterCounter)
}

View File

@ -15,14 +15,42 @@
package prommetrics package prommetrics
import ( import (
"github.com/prometheus/client_golang/prometheus" "errors"
"github.com/prometheus/client_golang/prometheus/collectors" "fmt"
"net" "net"
"net/http" "net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
) )
const commonPath = "/metrics" const commonPath = "/metrics"
var registry = &prometheusRegistry{prometheus.NewRegistry()}
type prometheusRegistry struct {
*prometheus.Registry
}
func (x *prometheusRegistry) MustRegister(cs ...prometheus.Collector) {
for _, c := range cs {
if err := x.Registry.Register(c); err != nil {
if errors.As(err, &prometheus.AlreadyRegisteredError{}) {
continue
}
panic(err)
}
}
}
func init() {
registry.MustRegister(
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
collectors.NewGoCollector(),
)
}
var ( var (
baseCollector = []prometheus.Collector{ baseCollector = []prometheus.Collector{
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
@ -36,3 +64,48 @@ func Init(registry *prometheus.Registry, listener net.Listener, path string, han
srv.Handle(path, handler) srv.Handle(path, handler)
return http.Serve(listener, srv) return http.Serve(listener, srv)
} }
func RegistryAll() {
RegistryApi()
RegistryAuth()
RegistryMsg()
RegistryMsgGateway()
RegistryPush()
RegistryUser()
RegistryRpc()
RegistryTransfer()
}
func Start(listener net.Listener) error {
srv := http.NewServeMux()
srv.Handle(commonPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
return http.Serve(listener, srv)
}
const (
APIKeyName = "api"
MessageTransferKeyName = "message-transfer"
)
type Target struct {
Target string `json:"target"`
Labels map[string]string `json:"labels"`
}
type RespTarget struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}
func BuildDiscoveryKey(name string) string {
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
}
func BuildDefaultTarget(host string, ip int) Target {
return Target{
Target: fmt.Sprintf("%s:%d", host, ip),
Labels: map[string]string{
"namespace": "default",
},
}
}

View File

@ -14,6 +14,8 @@
package prommetrics package prommetrics
import "testing"
//func TestNewGrpcPromObj(t *testing.T) { //func TestNewGrpcPromObj(t *testing.T) {
// // Create a custom metric to pass into the NewGrpcPromObj function. // // Create a custom metric to pass into the NewGrpcPromObj function.
// customMetric := prometheus.NewCounter(prometheus.CounterOpts{ // customMetric := prometheus.NewCounter(prometheus.CounterOpts{
@ -67,3 +69,9 @@ package prommetrics
// }) // })
// } // }
//} //}
func TestName(t *testing.T) {
RegistryApi()
RegistryApi()
}

View File

@ -1,12 +1,13 @@
package prommetrics package prommetrics
import ( import (
"net"
"strconv"
gp "github.com/grpc-ecosystem/go-grpc-prometheus" gp "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
"strconv"
) )
const rpcPath = commonPath const rpcPath = commonPath
@ -22,6 +23,10 @@ var (
) )
) )
func RegistryRpc() {
registry.MustRegister(rpcCounter)
}
func RpcInit(cs []prometheus.Collector, listener net.Listener) error { func RpcInit(cs []prometheus.Collector, listener net.Listener) error {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
cs = append(append( cs = append(append(

View File

@ -15,9 +15,10 @@
package prommetrics package prommetrics
import ( import (
"net"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
) )
var ( var (
@ -43,6 +44,16 @@ var (
}) })
) )
func RegistryTransfer() {
registry.MustRegister(
MsgInsertRedisSuccessCounter,
MsgInsertRedisFailedCounter,
MsgInsertMongoSuccessCounter,
MsgInsertMongoFailedCounter,
SeqSetFailedCounter,
)
}
func TransferInit(listener net.Listener) error { func TransferInit(listener net.Listener) error {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
cs := append( cs := append(

View File

@ -1,15 +0,0 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redispubsub // import "github.com/openimsdk/open-im-server/v3/pkg/common/redispubsub"

View File

@ -1,30 +0,0 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redispubsub
import "github.com/redis/go-redis/v9"
type Publisher struct {
client redis.UniversalClient
channel string
}
func NewPublisher(client redis.UniversalClient, channel string) *Publisher {
return &Publisher{client: client, channel: channel}
}
func (p *Publisher) Publish(message string) error {
return p.client.Publish(ctx, p.channel, message).Err()
}

View File

@ -1,49 +0,0 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redispubsub
import (
"context"
"github.com/redis/go-redis/v9"
)
var ctx = context.Background()
type Subscriber struct {
client redis.UniversalClient
channel string
}
func NewSubscriber(client redis.UniversalClient, channel string) *Subscriber {
return &Subscriber{client: client, channel: channel}
}
func (s *Subscriber) OnMessage(ctx context.Context, callback func(string)) error {
messageChannel := s.client.Subscribe(ctx, s.channel).Channel()
go func() {
for {
select {
case <-ctx.Done():
return
case msg := <-messageChannel:
callback(msg.Payload)
}
}
}()
return nil
}

View File

@ -1,15 +0,0 @@
// Copyright © 2024 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package startrpc // import "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"

View File

@ -19,7 +19,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http"
"os" "os"
"os/signal" "os/signal"
"strconv" "strconv"
@ -27,205 +26,186 @@ import (
"time" "time"
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config" conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/jsonutil" "github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/network"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"github.com/openimsdk/tools/utils/runtimeenv"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/utils/network"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
) )
// Start rpc server. func init() {
func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP, prommetrics.RegistryAll()
}
func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP,
registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T, registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T,
watchConfigNames []string, watchServiceNames []string, watchConfigNames []string, watchServiceNames []string,
rpcFn func(ctx context.Context, config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, rpcFn func(ctx context.Context, config T, client discovery.Conn, server grpc.ServiceRegistrar) error,
options ...grpc.ServerOption) error { options ...grpc.ServerOption) error {
watchConfigNames = append(watchConfigNames, conf.LogConfigFileName)
var (
rpcTcpAddr string
netDone = make(chan struct{}, 2)
netErr error
prometheusPort int
)
if notification != nil { if notification != nil {
conf.InitNotification(notification) conf.InitNotification(notification)
} }
options = append(options, mw.GrpcServer())
registerIP, err := network.GetRpcRegisterIP(registerIP) registerIP, err := network.GetRpcRegisterIP(registerIP)
if err != nil { if err != nil {
return err return err
} }
var prometheusListenAddr string
runTimeEnv := runtimeenv.PrintRuntimeEnvironment() if autoSetPorts {
prometheusListenAddr = net.JoinHostPort(listenIP, "0")
if !autoSetPorts { } else {
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index) prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
if err != nil { if err != nil {
return err return err
} }
rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) prometheusListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(prometheusPort))
} else {
rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), "0")
} }
getAutoPort := func() (net.Listener, int, error) { watchConfigNames = append(watchConfigNames, conf.LogConfigFileName)
listener, err := net.Listen("tcp", rpcTcpAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if autoSetPorts && discovery.Enable != conf.ETCD { client, err := kdisc.NewDiscoveryRegister(disc, watchServiceNames)
return errs.New("only etcd support autoSetPorts", "rpcRegisterName", rpcRegisterName).Wrap()
}
client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv, watchServiceNames)
if err != nil { if err != nil {
return err return err
} }
defer client.Close() defer client.Close()
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) client.AddOption(
mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")),
)
// var reg *prometheus.Registry ctx, cancel := context.WithCancelCause(ctx)
// var metric *grpcprometheus.ServerMetrics
if prometheusConfig.Enable { go func() {
// cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share) sigs := make(chan os.Signal, 1)
// reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics) signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
// options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()), select {
// grpc.UnaryInterceptor(metric.UnaryServerInterceptor())) case <-ctx.Done():
return
case val := <-sigs:
log.ZDebug(ctx, "recv signal", "signal", val.String())
cancel(fmt.Errorf("signal %s", val.String()))
}
}()
if prometheusListenAddr != "" {
options = append( options = append(
options, mw.GrpcServer(), options,
prommetricsUnaryInterceptor(rpcRegisterName), prommetricsUnaryInterceptor(rpcRegisterName),
prommetricsStreamInterceptor(rpcRegisterName), prommetricsStreamInterceptor(rpcRegisterName),
) )
prometheusListener, prometheusPort, err := listenTCP(prometheusListenAddr)
var ( if err != nil {
listener net.Listener
)
if autoSetPorts {
listener, prometheusPort, err = getAutoPort()
if err != nil {
return err
}
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(rpcRegisterName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(prometheusConfig.Ports, index)
if err != nil {
return err
}
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
}
cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, discovery)
go func() {
if err := prommetrics.RpcInit(cs, listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
netDone <- struct{}{}
}
//metric.InitializeMetrics(srv)
// Create a HTTP server for prometheus.
// httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
// if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
// netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
// netDone <- struct{}{}
// }
}()
} else {
options = append(options, mw.GrpcServer())
}
listener, port, err := getAutoPort()
if err != nil {
return err
}
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", port,
"prometheusPort", prometheusPort)
defer listener.Close()
srv := grpc.NewServer(options...)
err = rpcFn(ctx, config, client, srv)
if err != nil {
return err
}
err = client.Register(
rpcRegisterName,
registerIP,
port,
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
if err != nil {
return err
}
go func() {
err := srv.Serve(listener)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, "rpc start err: ", rpcTcpAddr)
netDone <- struct{}{}
}
}()
if discovery.Enable == conf.ETCD {
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), watchConfigNames)
cm.Watch(ctx)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGTERM)
select {
case <-sigs:
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil {
return err return err
} }
return nil log.ZDebug(ctx, "prometheus start", "addr", prometheusListener.Addr(), "rpcRegisterName", rpcRegisterName)
case <-netDone: target, err := jsonutil.JsonMarshal(prommetrics.BuildDefaultTarget(registerIP, prometheusPort))
return netErr if err != nil {
return err
}
if err := client.SetKey(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), target); err != nil {
if !errors.Is(err, discovery.ErrNotSupportedKeyValue) {
return err
}
}
go func() {
err := prommetrics.Start(prometheusListener)
if err == nil {
err = fmt.Errorf("listener done")
}
cancel(fmt.Errorf("prommetrics %s %w", rpcRegisterName, err))
}()
} }
var (
rpcServer *grpc.Server
rpcGracefulStop chan struct{}
)
onGrpcServiceRegistrar := func(desc *grpc.ServiceDesc, impl any) {
if rpcServer != nil {
rpcServer.RegisterService(desc, impl)
return
}
var rpcListenAddr string
if autoSetPorts {
rpcListenAddr = net.JoinHostPort(listenIP, "0")
} else {
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
if err != nil {
cancel(fmt.Errorf("rpcPorts index out of range %s %w", rpcRegisterName, err))
return
}
rpcListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(rpcPort))
}
rpcListener, err := net.Listen("tcp", rpcListenAddr)
if err != nil {
cancel(fmt.Errorf("listen rpc %s %s %w", rpcRegisterName, rpcListenAddr, err))
return
}
rpcServer = grpc.NewServer(options...)
rpcServer.RegisterService(desc, impl)
rpcGracefulStop = make(chan struct{})
rpcPort := rpcListener.Addr().(*net.TCPAddr).Port
log.ZDebug(ctx, "rpc start register", "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "rpcPort", rpcPort)
grpcOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
rpcGracefulStop = make(chan struct{})
go func() {
<-ctx.Done()
rpcServer.GracefulStop()
close(rpcGracefulStop)
}()
if err := client.Register(ctx, rpcRegisterName, registerIP, rpcListener.Addr().(*net.TCPAddr).Port, grpcOpt); err != nil {
cancel(fmt.Errorf("rpc register %s %w", rpcRegisterName, err))
return
}
go func() {
err := rpcServer.Serve(rpcListener)
if err == nil {
err = fmt.Errorf("serve end")
}
cancel(fmt.Errorf("rpc %s %w", rpcRegisterName, err))
}()
}
err = rpcFn(ctx, config, client, &grpcServiceRegistrar{onRegisterService: onGrpcServiceRegistrar})
if err != nil {
return err
}
<-ctx.Done()
log.ZDebug(ctx, "cmd wait done", "err", context.Cause(ctx))
if rpcGracefulStop != nil {
timeout := time.NewTimer(time.Second * 15)
defer timeout.Stop()
select {
case <-timeout.C:
log.ZWarn(ctx, "rcp graceful stop timeout", nil)
case <-rpcGracefulStop:
log.ZDebug(ctx, "rcp graceful stop done")
}
}
return context.Cause(ctx)
} }
func gracefulStopWithCtx(ctx context.Context, f func()) error { func listenTCP(addr string) (net.Listener, int, error) {
done := make(chan struct{}, 1) listener, err := net.Listen("tcp", addr)
go func() { if err != nil {
f() return nil, 0, errs.WrapMsg(err, "listen err", "addr", addr)
close(done)
}()
select {
case <-ctx.Done():
return errs.New("timeout, ctx graceful stop")
case <-done:
return nil
} }
return listener, listener.Addr().(*net.TCPAddr).Port, nil
} }
func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption { func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
@ -249,3 +229,11 @@ func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption { func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
return grpc.ChainStreamInterceptor() return grpc.ChainStreamInterceptor()
} }
type grpcServiceRegistrar struct {
onRegisterService func(desc *grpc.ServiceDesc, impl any)
}
func (x *grpcServiceRegistrar) RegisterService(desc *grpc.ServiceDesc, impl any) {
x.onRegisterService(desc, impl)
}

View File

@ -0,0 +1,50 @@
package mcache
import (
"context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/s3/minio"
)
func NewMinioCache(cache database.Cache) minio.Cache {
return &minioCache{
cache: cache,
expireTime: time.Hour * 24 * 7,
}
}
type minioCache struct {
cache database.Cache
expireTime time.Duration
}
func (g *minioCache) getObjectImageInfoKey(key string) string {
return cachekey.GetObjectImageInfoKey(key)
}
func (g *minioCache) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
}
func (g *minioCache) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getObjectImageInfoKey(key))
}
return g.cache.Del(ctx, ks)
}
func (g *minioCache) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
return g.cache.Del(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
}
func (g *minioCache) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
return getCache[*minio.ImageInfo](ctx, g.cache, g.getObjectImageInfoKey(key), g.expireTime, fn)
}
func (g *minioCache) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
return getCache[string](ctx, g.cache, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
}

View File

@ -0,0 +1,132 @@
package mcache
import (
"context"
"strconv"
"sync"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9"
)
var (
memMsgCache lru.LRU[string, *model.MsgInfoModel]
initMemMsgCache sync.Once
)
func NewMsgCache(cache database.Cache, msgDocDatabase database.Msg) cache.MsgCache {
initMemMsgCache.Do(func() {
memMsgCache = lru.NewLayLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
})
return &msgCache{
cache: cache,
msgDocDatabase: msgDocDatabase,
memMsgCache: memMsgCache,
}
}
type msgCache struct {
cache database.Cache
msgDocDatabase database.Msg
memMsgCache lru.LRU[string, *model.MsgInfoModel]
}
func (x *msgCache) getSendMsgKey(id string) string {
return cachekey.GetSendMsgKey(id)
}
func (x *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
return x.cache.Set(ctx, x.getSendMsgKey(id), strconv.Itoa(int(status)), time.Hour*24)
}
func (x *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
key := x.getSendMsgKey(id)
res, err := x.cache.Get(ctx, []string{key})
if err != nil {
return 0, err
}
val, ok := res[key]
if !ok {
return 0, errs.Wrap(redis.Nil)
}
status, err := strconv.Atoi(val)
if err != nil {
return 0, errs.WrapMsg(err, "GetSendMsgStatus strconv.Atoi error", "val", val)
}
return int32(status), nil
}
func (x *msgCache) getMsgCacheKey(conversationID string, seq int64) string {
return cachekey.GetMsgCacheKey(conversationID, seq)
}
func (x *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
if len(seqs) == 0 {
return nil, nil
}
keys := make([]string, 0, len(seqs))
keySeq := make(map[string]int64, len(seqs))
for _, seq := range seqs {
key := x.getMsgCacheKey(conversationID, seq)
keys = append(keys, key)
keySeq[key] = seq
}
res, err := x.memMsgCache.GetBatch(keys, func(keys []string) (map[string]*model.MsgInfoModel, error) {
findSeqs := make([]int64, 0, len(keys))
for _, key := range keys {
seq, ok := keySeq[key]
if !ok {
continue
}
findSeqs = append(findSeqs, seq)
}
res, err := x.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
if err != nil {
return nil, err
}
kv := make(map[string]*model.MsgInfoModel)
for i := range res {
msg := res[i]
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
continue
}
key := x.getMsgCacheKey(conversationID, msg.Msg.Seq)
kv[key] = msg
}
return kv, nil
})
if err != nil {
return nil, err
}
return datautil.Values(res), nil
}
func (x msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
if len(seqs) == 0 {
return nil
}
for _, seq := range seqs {
x.memMsgCache.Del(x.getMsgCacheKey(conversationID, seq))
}
return nil
}
func (x *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
for i := range msgs {
msg := msgs[i]
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
continue
}
x.memMsgCache.Set(x.getMsgCacheKey(conversationID, msg.Msg.Seq), msg)
}
return nil
}

View File

@ -0,0 +1,82 @@
package mcache
import (
"context"
"sync"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
)
var (
globalOnlineCache cache.OnlineCache
globalOnlineOnce sync.Once
)
func NewOnlineCache() cache.OnlineCache {
globalOnlineOnce.Do(func() {
globalOnlineCache = &onlineCache{
user: make(map[string]map[int32]struct{}),
}
})
return globalOnlineCache
}
type onlineCache struct {
lock sync.RWMutex
user map[string]map[int32]struct{}
}
func (x *onlineCache) GetOnline(ctx context.Context, userID string) ([]int32, error) {
x.lock.RLock()
defer x.lock.RUnlock()
pSet, ok := x.user[userID]
if !ok {
return nil, nil
}
res := make([]int32, 0, len(pSet))
for k := range pSet {
res = append(res, k)
}
return res, nil
}
func (x *onlineCache) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
x.lock.Lock()
defer x.lock.Unlock()
pSet, ok := x.user[userID]
if ok {
for _, p := range offline {
delete(pSet, p)
}
}
if len(online) > 0 {
if !ok {
pSet = make(map[int32]struct{})
x.user[userID] = pSet
}
for _, p := range online {
pSet[p] = struct{}{}
}
}
if len(pSet) == 0 {
delete(x.user, userID)
}
return nil
}
func (x *onlineCache) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
if cursor != 0 {
return nil, 0, nil
}
x.lock.RLock()
defer x.lock.RUnlock()
res := make(map[string][]int32)
for k, v := range x.user {
pSet := make([]int32, 0, len(v))
for p := range v {
pSet = append(pSet, p)
}
res[k] = pSet
}
return res, 0, nil
}

View File

@ -0,0 +1,79 @@
package mcache
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
)
func NewSeqConversationCache(sc database.SeqConversation) cache.SeqConversationCache {
return &seqConversationCache{
sc: sc,
}
}
type seqConversationCache struct {
sc database.SeqConversation
}
func (x *seqConversationCache) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
return x.sc.Malloc(ctx, conversationID, size)
}
func (x *seqConversationCache) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
return x.sc.SetMinSeq(ctx, conversationID, seq)
}
func (x *seqConversationCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
return x.sc.GetMinSeq(ctx, conversationID)
}
func (x *seqConversationCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
res := make(map[string]int64)
for _, conversationID := range conversationIDs {
seq, err := x.GetMinSeq(ctx, conversationID)
if err != nil {
return nil, err
}
res[conversationID] = seq
}
return res, nil
}
func (x *seqConversationCache) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
res := make(map[string]database.SeqTime)
for _, conversationID := range conversationIDs {
seq, err := x.GetMinSeq(ctx, conversationID)
if err != nil {
return nil, err
}
res[conversationID] = database.SeqTime{Seq: seq}
}
return res, nil
}
func (x *seqConversationCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
return x.sc.GetMaxSeq(ctx, conversationID)
}
func (x *seqConversationCache) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
seq, err := x.GetMinSeq(ctx, conversationID)
if err != nil {
return database.SeqTime{}, err
}
return database.SeqTime{Seq: seq}, nil
}
func (x *seqConversationCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
for conversationID, seq := range seqs {
if err := x.sc.SetMinSeq(ctx, conversationID, seq); err != nil {
return err
}
}
return nil
}
func (x *seqConversationCache) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
return x.GetMaxSeqsWithTime(ctx, conversationIDs)
}

View File

@ -0,0 +1,98 @@
package mcache
import (
"context"
"strconv"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9"
)
func NewThirdCache(cache database.Cache) cache.ThirdCache {
return &thirdCache{
cache: cache,
}
}
type thirdCache struct {
cache database.Cache
}
func (c *thirdCache) getGetuiTokenKey() string {
return cachekey.GetGetuiTokenKey()
}
func (c *thirdCache) getGetuiTaskIDKey() string {
return cachekey.GetGetuiTaskIDKey()
}
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
}
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
return cachekey.GetFcmAccountTokenKey(account, platformID)
}
func (c *thirdCache) get(ctx context.Context, key string) (string, error) {
res, err := c.cache.Get(ctx, []string{key})
if err != nil {
return "", err
}
if val, ok := res[key]; ok {
return val, nil
}
return "", errs.Wrap(redis.Nil)
}
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
return errs.Wrap(c.cache.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second))
}
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
return c.get(ctx, c.getFcmAccountTokenKey(account, platformID))
}
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
return c.cache.Del(ctx, []string{c.getFcmAccountTokenKey(account, platformID)})
}
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
return c.cache.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID), 1)
}
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
return c.cache.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), strconv.Itoa(value), 0)
}
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
str, err := c.get(ctx, c.getUserBadgeUnreadCountSumKey(userID))
if err != nil {
return 0, err
}
val, err := strconv.Atoi(str)
if err != nil {
return 0, errs.WrapMsg(err, "strconv.Atoi", "str", str)
}
return val, nil
}
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
return c.cache.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second)
}
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
return c.get(ctx, c.getGetuiTokenKey())
}
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
return c.cache.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second)
}
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
return c.get(ctx, c.getGetuiTaskIDKey())
}

130
pkg/common/storage/cache/mcache/token.go vendored Normal file
View File

@ -0,0 +1,130 @@
package mcache
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
)
func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel {
c := &tokenCache{cache: cache}
c.accessExpire = c.getExpireTime(accessExpire)
return c
}
type tokenCache struct {
cache database.Cache
accessExpire time.Duration
}
func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string {
return cachekey.GetTokenKey(userID, platformID) + ":" + token
}
func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire)
}
// SetTokenFlagEx set token and flag with expire time
func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
return x.SetTokenFlag(ctx, userID, platformID, token, flag)
}
func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
prefix := x.getTokenKey(userID, platformID, "")
m, err := x.cache.Prefix(ctx, prefix)
if err != nil {
return nil, errs.Wrap(err)
}
mm := make(map[string]int)
for k, v := range m {
state, err := strconv.Atoi(v)
if err != nil {
log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID)
continue
}
mm[strings.TrimPrefix(k, prefix)] = state
}
return mm, nil
}
func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) {
prefix := cachekey.UidPidToken + userID + ":"
tokens, err := x.cache.Prefix(ctx, prefix)
if err != nil {
return nil, err
}
res := make(map[int]map[string]int)
for key, flagStr := range tokens {
flag, err := strconv.Atoi(flagStr)
if err != nil {
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
continue
}
arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2)
if len(arr) != 2 {
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
continue
}
platformID, err := strconv.Atoi(arr[0])
if err != nil {
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
continue
}
token := arr[1]
if token == "" {
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
continue
}
tk, ok := res[platformID]
if !ok {
tk = make(map[string]int)
res[platformID] = tk
}
tk[token] = flag
}
return res, nil
}
func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
for token, flag := range m {
err := x.SetTokenFlag(ctx, userID, platformID, token, flag)
if err != nil {
return err
}
}
return nil
}
func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error {
for prefix, tokenFlag := range tokens {
for token, flag := range tokenFlag {
flagStr := fmt.Sprintf("%v", flag)
if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil {
return err
}
}
}
return nil
}
func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
keys := make([]string, 0, len(fields))
for _, token := range fields {
keys = append(keys, x.getTokenKey(userID, platformID, token))
}
return x.cache.Del(ctx, keys)
}
func (x *tokenCache) getExpireTime(t int64) time.Duration {
return time.Hour * 24 * time.Duration(t)
}

View File

@ -0,0 +1,63 @@
package mcache
import (
"context"
"encoding/json"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/log"
)
func getCache[V any](ctx context.Context, cache database.Cache, key string, expireTime time.Duration, fn func(ctx context.Context) (V, error)) (V, error) {
getDB := func() (V, bool, error) {
res, err := cache.Get(ctx, []string{key})
if err != nil {
var val V
return val, false, err
}
var val V
if str, ok := res[key]; ok {
if json.Unmarshal([]byte(str), &val) != nil {
return val, false, err
}
return val, true, nil
}
return val, false, nil
}
dbVal, ok, err := getDB()
if err != nil {
return dbVal, err
}
if ok {
return dbVal, nil
}
lockValue, err := cache.Lock(ctx, key, time.Minute)
if err != nil {
return dbVal, err
}
defer func() {
if err := cache.Unlock(ctx, key, lockValue); err != nil {
log.ZError(ctx, "unlock cache key", err, "key", key, "value", lockValue)
}
}()
dbVal, ok, err = getDB()
if err != nil {
return dbVal, err
}
if ok {
return dbVal, nil
}
val, err := fn(ctx)
if err != nil {
return val, err
}
data, err := json.Marshal(val)
if err != nil {
return val, err
}
if err := cache.Set(ctx, key, string(data), expireTime); err != nil {
return val, err
}
return val, nil
}

View File

@ -3,28 +3,65 @@ package redis
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"time"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight"
"time"
"unsafe"
) )
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient { // GetRocksCacheOptions returns the default configuration options for RocksCache.
type Client struct { func GetRocksCacheOptions() *rockscache.Options {
rdb redis.UniversalClient opts := rockscache.NewDefaultOptions()
_ rockscache.Options opts.LockExpire = rocksCacheTimeout
_ singleflight.Group opts.WaitReplicasTimeout = rocksCacheTimeout
} opts.StrongConsistency = true
return (*Client)(unsafe.Pointer(cli)).rdb opts.RandomExpireAdjustment = 0.2
return &opts
} }
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) { func newRocksCacheClient(rdb redis.UniversalClient) *rocksCacheClient {
if rdb == nil {
return &rocksCacheClient{}
}
rc := &rocksCacheClient{
rdb: rdb,
client: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
}
return rc
}
type rocksCacheClient struct {
rdb redis.UniversalClient
client *rockscache.Client
}
func (x *rocksCacheClient) GetClient() *rockscache.Client {
return x.client
}
func (x *rocksCacheClient) Disable() bool {
return x.client == nil
}
func (x *rocksCacheClient) GetRedis() redis.UniversalClient {
return x.rdb
}
func (x *rocksCacheClient) GetBatchDeleter(topics ...string) cache.BatchDeleter {
return NewBatchDeleterRedis(x, topics)
}
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rocksCacheClient, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
if len(ids) == 0 { if len(ids) == 0 {
return nil, nil return nil, nil
} }
if rcClient.Disable() {
return fn(ctx, ids)
}
findKeys := make([]string, 0, len(ids)) findKeys := make([]string, 0, len(ids))
keyId := make(map[string]K) keyId := make(map[string]K)
for _, id := range ids { for _, id := range ids {
@ -35,13 +72,13 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
keyId[key] = id keyId[key] = id
findKeys = append(findKeys, key) findKeys = append(findKeys, key)
} }
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys) slotKeys, err := groupKeysBySlot(ctx, rcClient.GetRedis(), findKeys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
result := make([]*V, 0, len(findKeys)) result := make([]*V, 0, len(findKeys))
for _, keys := range slotKeys { for _, keys := range slotKeys {
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) { indexCache, err := rcClient.GetClient().FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
queryIds := make([]K, 0, len(idx)) queryIds := make([]K, 0, len(idx))
idIndex := make(map[K]int) idIndex := make(map[K]int)
for _, index := range idx { for _, index := range idx {

View File

@ -1,23 +1,11 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"time"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/localcache"
@ -25,7 +13,6 @@ import (
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) )
const ( const (
@ -41,10 +28,10 @@ type BatchDeleterRedis struct {
} }
// NewBatchDeleterRedis creates a new BatchDeleterRedis instance. // NewBatchDeleterRedis creates a new BatchDeleterRedis instance.
func NewBatchDeleterRedis(redisClient redis.UniversalClient, options *rockscache.Options, redisPubTopics []string) *BatchDeleterRedis { func NewBatchDeleterRedis(rcClient *rocksCacheClient, redisPubTopics []string) *BatchDeleterRedis {
return &BatchDeleterRedis{ return &BatchDeleterRedis{
redisClient: redisClient, redisClient: rcClient.GetRedis(),
rocksClient: rockscache.NewClient(redisClient, *options), rocksClient: rcClient.GetClient(),
redisPubTopics: redisPubTopics, redisPubTopics: redisPubTopics,
} }
} }
@ -107,21 +94,29 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
c.keys = append(c.keys, keys...) c.keys = append(c.keys, keys...)
} }
// GetRocksCacheOptions returns the default configuration options for RocksCache. type disableBatchDeleter struct{}
func GetRocksCacheOptions() *rockscache.Options {
opts := rockscache.NewDefaultOptions()
opts.LockExpire = rocksCacheTimeout
opts.WaitReplicasTimeout = rocksCacheTimeout
opts.StrongConsistency = true
opts.RandomExpireAdjustment = 0.2
return &opts func (x disableBatchDeleter) ChainExecDel(ctx context.Context) error {
return nil
} }
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) { func (x disableBatchDeleter) ExecDelWithKeys(ctx context.Context, keys []string) error {
return nil
}
func (x disableBatchDeleter) Clone() cache.BatchDeleter {
return x
}
func (x disableBatchDeleter) AddKeys(keys ...string) {}
func getCache[T any](ctx context.Context, rcClient *rocksCacheClient, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
if rcClient.Disable() {
return fn(ctx)
}
var t T var t T
var write bool var write bool
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { v, err := rcClient.GetClient().Fetch2(ctx, key, expire, func() (s string, err error) {
t, err = fn(ctx) t, err = fn(ctx)
if err != nil { if err != nil {
//log.ZError(ctx, "getCache query database failed", err, "key", key) //log.ZError(ctx, "getCache query database failed", err, "key", key)
@ -152,31 +147,3 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
return t, nil return t, nil
} }
//func batchGetCache[T any, K comparable](
// ctx context.Context,
// rcClient *rockscache.Client,
// expire time.Duration,
// keys []K,
// keyFn func(key K) string,
// fns func(ctx context.Context, key K) (T, error),
//) ([]T, error) {
// if len(keys) == 0 {
// return nil, nil
// }
// res := make([]T, 0, len(keys))
// for _, key := range keys {
// val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
// return fns(ctx, key)
// })
// if err != nil {
// if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
// continue
// }
// return nil, errs.Wrap(err)
// }
// res = append(res, val)
// }
//
// return res, nil
//}

View File

@ -1,29 +1,14 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"github.com/dtm-labs/rockscache" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) )
const ( const (
@ -33,18 +18,16 @@ const (
type BlackCacheRedis struct { type BlackCacheRedis struct {
cache.BatchDeleter cache.BatchDeleter
expireTime time.Duration expireTime time.Duration
rcClient *rockscache.Client rcClient *rocksCacheClient
blackDB database.Black blackDB database.Black
} }
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black, options *rockscache.Options) cache.BlackCache { func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black) cache.BlackCache {
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic}) rc := newRocksCacheClient(rdb)
b := localCache.Friend
log.ZDebug(context.Background(), "black local cache init", "Topic", b.Topic, "SlotNum", b.SlotNum, "SlotSize", b.SlotSize, "enable", b.Enable())
return &BlackCacheRedis{ return &BlackCacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
expireTime: blackExpireTime, expireTime: blackExpireTime,
rcClient: rockscache.NewClient(rdb, *options), rcClient: rc,
blackDB: blackDB, blackDB: blackDB,
} }
} }

View File

@ -1,47 +1,30 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"github.com/dtm-labs/rockscache" "math/big"
"strings"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/encrypt" "github.com/openimsdk/tools/utils/encrypt"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"math/big"
"strings"
"time"
) )
const ( const (
conversationExpireTime = time.Second * 60 * 60 * 12 conversationExpireTime = time.Second * 60 * 60 * 12
) )
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts *rockscache.Options, db database.Conversation) cache.ConversationCache { func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, db database.Conversation) cache.ConversationCache {
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Conversation.Topic}) rc := newRocksCacheClient(rdb)
c := localCache.Conversation
log.ZDebug(context.Background(), "conversation local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable())
return &ConversationRedisCache{ return &ConversationRedisCache{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(localCache.Conversation.Topic),
rcClient: rockscache.NewClient(rdb, *opts), rcClient: rc,
conversationDB: db, conversationDB: db,
expireTime: conversationExpireTime, expireTime: conversationExpireTime,
} }
@ -49,7 +32,7 @@ func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCac
type ConversationRedisCache struct { type ConversationRedisCache struct {
cache.BatchDeleter cache.BatchDeleter
rcClient *rockscache.Client rcClient *rocksCacheClient
conversationDB database.Conversation conversationDB database.Conversation
expireTime time.Duration expireTime time.Duration
} }

View File

@ -1,15 +0,0 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis

View File

@ -1,30 +1,14 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"time" "time"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
) )
@ -38,21 +22,18 @@ type FriendCacheRedis struct {
cache.BatchDeleter cache.BatchDeleter
friendDB database.Friend friendDB database.Friend
expireTime time.Duration expireTime time.Duration
rcClient *rockscache.Client rcClient *rocksCacheClient
syncCount int syncCount int
} }
// NewFriendCacheRedis creates a new instance of FriendCacheRedis. // NewFriendCacheRedis creates a new instance of FriendCacheRedis.
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend, func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend) cache.FriendCache {
options *rockscache.Options) cache.FriendCache { rc := newRocksCacheClient(rdb)
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic})
f := localCache.Friend
log.ZDebug(context.Background(), "friend local cache init", "Topic", f.Topic, "SlotNum", f.SlotNum, "SlotSize", f.SlotSize, "enable", f.Enable())
return &FriendCacheRedis{ return &FriendCacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
friendDB: friendDB, friendDB: friendDB,
expireTime: friendExpireTime, expireTime: friendExpireTime,
rcClient: rockscache.NewClient(rdb, *options), rcClient: rc,
} }
} }

View File

@ -1,17 +1,3 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
@ -19,7 +5,6 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
@ -36,34 +21,21 @@ const (
groupExpireTime = time.Second * 60 * 60 * 12 groupExpireTime = time.Second * 60 * 60 * 12
) )
var errIndex = errs.New("err index")
type GroupCacheRedis struct { type GroupCacheRedis struct {
cache.BatchDeleter cache.BatchDeleter
groupDB database.Group groupDB database.Group
groupMemberDB database.GroupMember groupMemberDB database.GroupMember
groupRequestDB database.GroupRequest groupRequestDB database.GroupRequest
expireTime time.Duration expireTime time.Duration
rcClient *rockscache.Client rcClient *rocksCacheClient
groupHash cache.GroupHash groupHash cache.GroupHash
} }
func NewGroupCacheRedis( func NewGroupCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, groupDB database.Group, groupMemberDB database.GroupMember, groupRequestDB database.GroupRequest, hashCode cache.GroupHash) cache.GroupCache {
rdb redis.UniversalClient, rc := newRocksCacheClient(rdb)
localCache *config.LocalCache,
groupDB database.Group,
groupMemberDB database.GroupMember,
groupRequestDB database.GroupRequest,
hashCode cache.GroupHash,
opts *rockscache.Options,
) cache.GroupCache {
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Group.Topic})
g := localCache.Group
log.ZDebug(context.Background(), "group local cache init", "Topic", g.Topic, "SlotNum", g.SlotNum, "SlotSize", g.SlotSize, "enable", g.Enable())
return &GroupCacheRedis{ return &GroupCacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(localCache.Group.Topic),
rcClient: rockscache.NewClient(rdb, *opts), rcClient: rc,
expireTime: groupExpireTime, expireTime: groupExpireTime,
groupDB: groupDB, groupDB: groupDB,
groupMemberDB: groupMemberDB, groupMemberDB: groupMemberDB,

59
pkg/common/storage/cache/redis/minio.go vendored Normal file
View File

@ -0,0 +1,59 @@
package redis
import (
"context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/s3/minio"
"github.com/redis/go-redis/v9"
)
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
rc := newRocksCacheClient(rdb)
return &minioCacheRedis{
BatchDeleter: rc.GetBatchDeleter(),
rcClient: rc,
expireTime: time.Hour * 24 * 7,
}
}
type minioCacheRedis struct {
cache.BatchDeleter
rcClient *rocksCacheClient
expireTime time.Duration
}
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
return cachekey.GetObjectImageInfoKey(key)
}
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
}
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getObjectImageInfoKey(key))
}
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
}
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
}
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
if err != nil {
return nil, err
}
return info, nil
}
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
}

View File

@ -3,7 +3,8 @@ package redis
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/dtm-labs/rockscache" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
@ -11,7 +12,6 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) // ) //
// msgCacheTimeout is expiration time of message cache, 86400 seconds // msgCacheTimeout is expiration time of message cache, 86400 seconds
@ -19,15 +19,13 @@ const msgCacheTimeout = time.Hour * 24
func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache { func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache {
return &msgCache{ return &msgCache{
rdb: client, rcClient: newRocksCacheClient(client),
rcClient: rockscache.NewClient(client, *GetRocksCacheOptions()),
msgDocDatabase: db, msgDocDatabase: db,
} }
} }
type msgCache struct { type msgCache struct {
rdb redis.UniversalClient rcClient *rocksCacheClient
rcClient *rockscache.Client
msgDocDatabase database.Msg msgDocDatabase database.Msg
} }
@ -36,11 +34,11 @@ func (c *msgCache) getSendMsgKey(id string) string {
} }
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error { func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
return errs.Wrap(c.rdb.Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err()) return errs.Wrap(c.rcClient.GetRedis().Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
} }
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) { func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
result, err := c.rdb.Get(ctx, c.getSendMsgKey(id)).Int() result, err := c.rcClient.GetRedis().Get(ctx, c.getSendMsgKey(id)).Int()
return int32(result), errs.Wrap(err) return int32(result), errs.Wrap(err)
} }
@ -67,12 +65,12 @@ func (c *msgCache) DelMessageBySeqs(ctx context.Context, conversationID string,
keys := datautil.Slice(seqs, func(seq int64) string { keys := datautil.Slice(seqs, func(seq int64) string {
return cachekey.GetMsgCacheKey(conversationID, seq) return cachekey.GetMsgCacheKey(conversationID, seq)
}) })
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(c.rcClient), keys) slotKeys, err := groupKeysBySlot(ctx, c.rcClient.GetRedis(), keys)
if err != nil { if err != nil {
return err return err
} }
for _, keys := range slotKeys { for _, keys := range slotKeys {
if err := c.rcClient.TagAsDeletedBatch2(ctx, keys); err != nil { if err := c.rcClient.GetClient().TagAsDeletedBatch2(ctx, keys); err != nil {
return err return err
} }
} }
@ -88,7 +86,7 @@ func (c *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string,
if err != nil { if err != nil {
return err return err
} }
if err := c.rcClient.RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil { if err := c.rcClient.GetClient().RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil {
return err return err
} }
} }

View File

@ -3,18 +3,24 @@ package redis
import ( import (
"context" "context"
"fmt" "fmt"
"strconv"
"strings"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"strconv"
"strings"
"time"
) )
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache { func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
if rdb == nil || config.Standalone() {
return mcache.NewOnlineCache()
}
return &userOnline{ return &userOnline{
rdb: rdb, rdb: rdb,
expire: cachekey.OnlineExpire, expire: cachekey.OnlineExpire,

View File

@ -2,7 +2,7 @@ package redis
import ( import (
"context" "context"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
@ -28,83 +28,83 @@ type Config struct {
// Option is a function type for configuring Config // Option is a function type for configuring Config
type Option func(c *Config) type Option func(c *Config)
// NewRedisShardManager creates a new RedisShardManager instance //// NewRedisShardManager creates a new RedisShardManager instance
func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager { //func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
config := &Config{ // config := &Config{
batchSize: defaultBatchSize, // Default batch size is 50 keys // batchSize: defaultBatchSize, // Default batch size is 50 keys
continueOnError: false, // continueOnError: false,
concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3 // concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
} // }
for _, opt := range opts { // for _, opt := range opts {
opt(config) // opt(config)
} // }
rsm := &RedisShardManager{ // rsm := &RedisShardManager{
redisClient: redisClient, // redisClient: redisClient,
config: config, // config: config,
} // }
return rsm // return rsm
} //}
//
// WithBatchSize sets the number of keys to process per batch //// WithBatchSize sets the number of keys to process per batch
func WithBatchSize(size int) Option { //func WithBatchSize(size int) Option {
return func(c *Config) { // return func(c *Config) {
c.batchSize = size // c.batchSize = size
} // }
} //}
//
// WithContinueOnError sets whether to continue processing on error //// WithContinueOnError sets whether to continue processing on error
func WithContinueOnError(continueOnError bool) Option { //func WithContinueOnError(continueOnError bool) Option {
return func(c *Config) { // return func(c *Config) {
c.continueOnError = continueOnError // c.continueOnError = continueOnError
} // }
} //}
//
// WithConcurrentLimit sets the concurrency limit //// WithConcurrentLimit sets the concurrency limit
func WithConcurrentLimit(limit int) Option { //func WithConcurrentLimit(limit int) Option {
return func(c *Config) { // return func(c *Config) {
c.concurrentLimit = limit // c.concurrentLimit = limit
} // }
} //}
//
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function. //// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
func (rsm *RedisShardManager) ProcessKeysBySlot( //func (rsm *RedisShardManager) ProcessKeysBySlot(
ctx context.Context, // ctx context.Context,
keys []string, // keys []string,
processFunc func(ctx context.Context, slot int64, keys []string) error, // processFunc func(ctx context.Context, slot int64, keys []string) error,
) error { //) error {
//
// Group keys by slot // // Group keys by slot
slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys) // slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
if err != nil { // if err != nil {
return err // return err
} // }
//
g, ctx := errgroup.WithContext(ctx) // g, ctx := errgroup.WithContext(ctx)
g.SetLimit(rsm.config.concurrentLimit) // g.SetLimit(rsm.config.concurrentLimit)
//
// Process keys in each slot using the provided function // // Process keys in each slot using the provided function
for slot, singleSlotKeys := range slots { // for slot, singleSlotKeys := range slots {
batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize) // batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
for _, batch := range batches { // for _, batch := range batches {
slot, batch := slot, batch // Avoid closure capture issue // slot, batch := slot, batch // Avoid closure capture issue
g.Go(func() error { // g.Go(func() error {
err := processFunc(ctx, slot, batch) // err := processFunc(ctx, slot, batch)
if err != nil { // if err != nil {
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch) // log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
if !rsm.config.continueOnError { // if !rsm.config.continueOnError {
return err // return err
} // }
} // }
return nil // return nil
}) // })
} // }
} // }
//
if err := g.Wait(); err != nil { // if err := g.Wait(); err != nil {
return err // return err
} // }
return nil // return nil
} //}
// groupKeysBySlot groups keys by their Redis cluster hash slots. // groupKeysBySlot groups keys by their Redis cluster hash slots.
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) { func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
@ -197,15 +197,15 @@ func ProcessKeysBySlot(
return nil return nil
} }
func DeleteCacheBySlot(ctx context.Context, rcClient *rockscache.Client, keys []string) error { func DeleteCacheBySlot(ctx context.Context, rcClient *rocksCacheClient, keys []string) error {
switch len(keys) { switch len(keys) {
case 0: case 0:
return nil return nil
case 1: case 1:
return rcClient.TagAsDeletedBatch2(ctx, keys) return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
default: default:
return ProcessKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), keys, func(ctx context.Context, slot int64, keys []string) error { return ProcessKeysBySlot(ctx, rcClient.GetRedis(), keys, func(ctx context.Context, slot int64, keys []string) error {
return rcClient.TagAsDeletedBatch2(ctx, keys) return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
}) })
} }
} }

View File

@ -1,39 +1,23 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"github.com/dtm-labs/rockscache" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/s3" "github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cont" "github.com/openimsdk/tools/s3/cont"
"github.com/openimsdk/tools/s3/minio"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) )
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache { func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache {
opts := rockscache.NewDefaultOptions() rc := newRocksCacheClient(rdb)
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
return &objectCacheRedis{ return &objectCacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(),
rcClient: rockscache.NewClient(rdb, opts), rcClient: rc,
expireTime: time.Hour * 12, expireTime: time.Hour * 12,
objDB: objDB, objDB: objDB,
} }
@ -42,7 +26,7 @@ func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) c
type objectCacheRedis struct { type objectCacheRedis struct {
cache.BatchDeleter cache.BatchDeleter
objDB database.ObjectInfo objDB database.ObjectInfo
rcClient *rockscache.Client rcClient *rocksCacheClient
expireTime time.Duration expireTime time.Duration
} }
@ -76,11 +60,10 @@ func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name stri
} }
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache { func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
opts := rockscache.NewDefaultOptions() rc := newRocksCacheClient(rdb)
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
return &s3CacheRedis{ return &s3CacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(),
rcClient: rockscache.NewClient(rdb, opts), rcClient: rc,
expireTime: time.Hour * 12, expireTime: time.Hour * 12,
s3: s3, s3: s3,
} }
@ -89,7 +72,7 @@ func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
type s3CacheRedis struct { type s3CacheRedis struct {
cache.BatchDeleter cache.BatchDeleter
s3 s3.Interface s3 s3.Interface
rcClient *rockscache.Client rcClient *rocksCacheClient
expireTime time.Duration expireTime time.Duration
} }
@ -110,52 +93,3 @@ func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (
return g.s3.StatObject(ctx, name) return g.s3.StatObject(ctx, name)
}) })
} }
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
opts := rockscache.NewDefaultOptions()
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
return &minioCacheRedis{
BatchDeleter: batchHandler,
rcClient: rockscache.NewClient(rdb, opts),
expireTime: time.Hour * 24 * 7,
}
}
type minioCacheRedis struct {
cache.BatchDeleter
rcClient *rockscache.Client
expireTime time.Duration
}
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
return cachekey.GetObjectImageInfoKey(key)
}
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
}
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
ks := make([]string, 0, len(keys))
for _, key := range keys {
ks = append(ks, g.getObjectImageInfoKey(key))
}
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
}
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
}
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
if err != nil {
return nil, err
}
return info, nil
}
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
}

View File

@ -4,33 +4,35 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/dtm-labs/rockscache" "strconv"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor" "github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"strconv"
"time"
) )
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache { func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
if rdb == nil {
return mcache.NewSeqConversationCache(mgo)
}
return &seqConversationCacheRedis{ return &seqConversationCacheRedis{
rdb: rdb,
mgo: mgo, mgo: mgo,
lockTime: time.Second * 3, lockTime: time.Second * 3,
dataTime: time.Hour * 24 * 365, dataTime: time.Hour * 24 * 365,
minSeqExpireTime: time.Hour, minSeqExpireTime: time.Hour,
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()), rcClient: newRocksCacheClient(rdb),
} }
} }
type seqConversationCacheRedis struct { type seqConversationCacheRedis struct {
rdb redis.UniversalClient
mgo database.SeqConversation mgo database.SeqConversation
rocks *rockscache.Client rcClient *rocksCacheClient
lockTime time.Duration lockTime time.Duration
dataTime time.Duration dataTime time.Duration
minSeqExpireTime time.Duration minSeqExpireTime time.Duration
@ -45,7 +47,7 @@ func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationI
} }
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) { func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) { return getCache(ctx, s.rcClient, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
return s.mgo.GetMinSeq(ctx, conversationID) return s.mgo.GetMinSeq(ctx, conversationID)
}) })
} }
@ -68,7 +70,7 @@ func (s *seqConversationCacheRedis) getSingleMaxSeqWithTime(ctx context.Context,
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error { func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
result := make([]*redis.StringCmd, len(keys)) result := make([]*redis.StringCmd, len(keys))
pipe := s.rdb.Pipeline() pipe := s.rcClient.GetRedis().Pipeline()
for i, key := range keys { for i, key := range keys {
result[i] = pipe.HGet(ctx, key, "CURR") result[i] = pipe.HGet(ctx, key, "CURR")
} }
@ -99,7 +101,7 @@ func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []s
func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error { func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error {
result := make([]*redis.SliceCmd, len(keys)) result := make([]*redis.SliceCmd, len(keys))
pipe := s.rdb.Pipeline() pipe := s.rcClient.GetRedis().Pipeline()
for i, key := range keys { for i, key := range keys {
result[i] = pipe.HMGet(ctx, key, "CURR", "TIME") result[i] = pipe.HMGet(ctx, key, "CURR", "TIME")
} }
@ -157,7 +159,7 @@ func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversation
if len(keys) == 1 { if len(keys) == 1 {
return s.getSingleMaxSeq(ctx, conversationIDs[0]) return s.getSingleMaxSeq(ctx, conversationIDs[0])
} }
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -190,7 +192,7 @@ func (s *seqConversationCacheRedis) GetMaxSeqsWithTime(ctx context.Context, conv
if len(keys) == 1 { if len(keys) == 1 {
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0]) return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
} }
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -234,7 +236,7 @@ redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
redis.call("EXPIRE", key, dataSecond) redis.call("EXPIRE", key, dataSecond)
return 0 return 0
` `
result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64() result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64()
if err != nil { if err != nil {
return 0, errs.Wrap(err) return 0, errs.Wrap(err)
} }
@ -305,7 +307,7 @@ table.insert(result, last_seq)
table.insert(result, mallocTime) table.insert(result, mallocTime)
return result return result
` `
result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice() result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice()
if err != nil { if err != nil {
return nil, errs.Wrap(err) return nil, errs.Wrap(err)
} }
@ -438,7 +440,7 @@ func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[str
return err return err
} }
} }
return DeleteCacheBySlot(ctx, s.rocks, keys) return DeleteCacheBySlot(ctx, s.rcClient, keys)
} }
// GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated // GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated
@ -456,7 +458,7 @@ func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context,
key2conversationID[key] = conversationID key2conversationID[key] = conversationID
keys = append(keys, key) keys = append(keys, key)
} }
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -465,7 +467,7 @@ func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context,
if len(keys) == 0 { if len(keys) == 0 {
continue continue
} }
pipe := s.rdb.Pipeline() pipe := s.rcClient.GetRedis().Pipeline()
cmds := make([]*redis.SliceCmd, 0, len(keys)) cmds := make([]*redis.SliceCmd, 0, len(keys))
for _, key := range keys { for _, key := range keys {
cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME")) cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME"))

View File

@ -2,31 +2,29 @@ package redis
import ( import (
"context" "context"
"github.com/dtm-labs/rockscache" "strconv"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"strconv"
"time"
) )
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser { func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
return &seqUserCacheRedis{ return &seqUserCacheRedis{
rdb: rdb,
mgo: mgo, mgo: mgo,
readSeqWriteRatio: 100, readSeqWriteRatio: 100,
expireTime: time.Hour * 24 * 7, expireTime: time.Hour * 24 * 7,
readExpireTime: time.Hour * 24 * 30, readExpireTime: time.Hour * 24 * 30,
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()), rocks: newRocksCacheClient(rdb),
} }
} }
type seqUserCacheRedis struct { type seqUserCacheRedis struct {
rdb redis.UniversalClient
mgo database.SeqUser mgo database.SeqUser
rocks *rockscache.Client rocks *rocksCacheClient
expireTime time.Duration expireTime time.Duration
readExpireTime time.Duration readExpireTime time.Duration
readSeqWriteRatio int64 readSeqWriteRatio int64
@ -54,7 +52,7 @@ func (s *seqUserCacheRedis) SetUserMaxSeq(ctx context.Context, conversationID st
if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil { if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil {
return err return err
} }
return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID)) return s.rocks.GetClient().TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
} }
func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) { func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
@ -74,12 +72,15 @@ func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID s
} }
func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error { func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
if s.rocks.GetRedis() == nil {
return s.SetUserReadSeqToDB(ctx, conversationID, userID, seq)
}
dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID) dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
if err != nil { if err != nil {
return err return err
} }
if dbSeq < seq { if dbSeq < seq {
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil { if err := s.rocks.GetClient().RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
return errs.Wrap(err) return errs.Wrap(err)
} }
} }
@ -109,12 +110,12 @@ func (s *seqUserCacheRedis) setUserRedisReadSeqs(ctx context.Context, userID str
keys = append(keys, key) keys = append(keys, key)
keySeq[key] = seq keySeq[key] = seq
} }
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys) slotKeys, err := groupKeysBySlot(ctx, s.rocks.GetRedis(), keys)
if err != nil { if err != nil {
return err return err
} }
for _, keys := range slotKeys { for _, keys := range slotKeys {
pipe := s.rdb.Pipeline() pipe := s.rocks.GetRedis().Pipeline()
for _, key := range keys { for _, key := range keys {
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10)) pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
pipe.Expire(ctx, key, s.readExpireTime) pipe.Expire(ctx, key, s.readExpireTime)

View File

@ -1,26 +1,13 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) )
func NewThirdCache(rdb redis.UniversalClient) cache.ThirdCache { func NewThirdCache(rdb redis.UniversalClient) cache.ThirdCache {

View File

@ -1,30 +1,16 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis package redis
import ( import (
"context" "context"
"time"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"time"
) )
const ( const (
@ -38,19 +24,17 @@ type UserCacheRedis struct {
rdb redis.UniversalClient rdb redis.UniversalClient
userDB database.User userDB database.User
expireTime time.Duration expireTime time.Duration
rcClient *rockscache.Client rcClient *rocksCacheClient
} }
func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB database.User, options *rockscache.Options) cache.UserCache { func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB database.User, options *rockscache.Options) cache.UserCache {
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.User.Topic}) rc := newRocksCacheClient(rdb)
u := localCache.User
log.ZDebug(context.Background(), "user local cache init", "Topic", u.Topic, "SlotNum", u.SlotNum, "SlotSize", u.SlotSize, "enable", u.Enable())
return &UserCacheRedis{ return &UserCacheRedis{
BatchDeleter: batchHandler, BatchDeleter: rc.GetBatchDeleter(localCache.User.Topic),
rdb: rdb, rdb: rdb,
userDB: userDB, userDB: userDB,
expireTime: userExpireTime, expireTime: userExpireTime,
rcClient: rockscache.NewClient(rdb, *options), rcClient: rc,
} }
} }

View File

@ -140,7 +140,7 @@ func NewGroupDatabase(
groupMemberDB: groupMemberDB, groupMemberDB: groupMemberDB,
groupRequestDB: groupRequestDB, groupRequestDB: groupRequestDB,
ctxTx: ctxTx, ctxTx: ctxTx,
cache: redis2.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash, redis2.GetRocksCacheOptions()), cache: redis2.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash),
} }
} }

View File

@ -18,19 +18,21 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"github.com/openimsdk/tools/mq"
"github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/protobuf/proto"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
@ -38,7 +40,6 @@ import (
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
) )
@ -102,22 +103,14 @@ type CommonMsgDatabase interface {
GetLastMessage(ctx context.Context, conversationIDS []string, userID string) (map[string]*sdkws.MsgData, error) GetLastMessage(ctx context.Context, conversationIDS []string, userID string) (map[string]*sdkws.MsgData, error)
} }
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) { func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, producer mq.Producer) CommonMsgDatabase {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil, err
}
producerToRedis, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToRedisTopic)
if err != nil {
return nil, err
}
return &commonMsgDatabase{ return &commonMsgDatabase{
msgDocDatabase: msgDocModel, msgDocDatabase: msgDocModel,
msgCache: msg, msgCache: msg,
seqUser: seqUser, seqUser: seqUser,
seqConversation: seqConversation, seqConversation: seqConversation,
producer: producerToRedis, producer: producer,
}, nil }
} }
type commonMsgDatabase struct { type commonMsgDatabase struct {
@ -126,12 +119,15 @@ type commonMsgDatabase struct {
msgCache cache.MsgCache msgCache cache.MsgCache
seqConversation cache.SeqConversationCache seqConversation cache.SeqConversationCache
seqUser cache.SeqUser seqUser cache.SeqUser
producer *kafka.Producer producer mq.Producer
} }
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error { func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
_, _, err := db.producer.SendMessage(ctx, key, msg2mq) data, err := proto.Marshal(msg2mq)
return err if err != nil {
return err
}
return db.producer.SendMessage(ctx, key, data)
} }
func (db *commonMsgDatabase) batchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error { func (db *commonMsgDatabase) batchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {

View File

@ -2,11 +2,13 @@ package controller
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/convert"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/tools/mq"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"google.golang.org/protobuf/proto"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
@ -14,7 +16,6 @@ import (
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
) )
@ -32,30 +33,30 @@ type MsgTransferDatabase interface {
SetHasReadSeqToDB(ctx context.Context, conversationID string, userSeqMap map[string]int64) error SetHasReadSeqToDB(ctx context.Context, conversationID string, userSeqMap map[string]int64) error
// to mq // to mq
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) error
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
} }
func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) { func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, mongoProducer, pushProducer mq.Producer) (MsgTransferDatabase, error) {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build()) //conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil { //if err != nil {
return nil, err // return nil, err
} //}
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic) //producerToMongo, err := kafka.NewKafkaProducerV2(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
if err != nil { //if err != nil {
return nil, err // return nil, err
} //}
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic) //producerToPush, err := kafka.NewKafkaProducerV2(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
if err != nil { //if err != nil {
return nil, err // return nil, err
} //}
return &msgTransferDatabase{ return &msgTransferDatabase{
msgDocDatabase: msgDocModel, msgDocDatabase: msgDocModel,
msgCache: msg, msgCache: msg,
seqUser: seqUser, seqUser: seqUser,
seqConversation: seqConversation, seqConversation: seqConversation,
producerToMongo: producerToMongo, producerToMongo: mongoProducer,
producerToPush: producerToPush, producerToPush: pushProducer,
}, nil }, nil
} }
@ -65,8 +66,8 @@ type msgTransferDatabase struct {
msgCache cache.MsgCache msgCache cache.MsgCache
seqConversation cache.SeqConversationCache seqConversation cache.SeqConversationCache
seqUser cache.SeqUser seqUser cache.SeqUser
producerToMongo *kafka.Producer producerToMongo mq.Producer
producerToPush *kafka.Producer producerToPush mq.Producer
} }
func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error { func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
@ -281,19 +282,25 @@ func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, conversati
return nil return nil
} }
func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) { func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) error {
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID}) data, err := proto.Marshal(&pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
if err != nil { if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq) return err
return 0, 0, err
} }
return partition, offset, nil if err := db.producerToPush.SendMessage(ctx, key, data); err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "conversationID", conversationID)
return err
}
return nil
} }
func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error { func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
if len(messages) > 0 { if len(messages) > 0 {
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages}) data, err := proto.Marshal(&pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
if err != nil { if err != nil {
return err
}
if err := db.producerToMongo.SendMessage(ctx, key, data); err != nil {
log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq) log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq)
return err return err
} }

View File

@ -17,12 +17,12 @@ package controller
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/protocol/push" "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq"
"google.golang.org/protobuf/proto"
) )
type PushDatabase interface { type PushDatabase interface {
@ -32,21 +32,13 @@ type PushDatabase interface {
type pushDataBase struct { type pushDataBase struct {
cache cache.ThirdCache cache cache.ThirdCache
producerToOfflinePush *kafka.Producer producerToOfflinePush mq.Producer
} }
func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase { func NewPushDatabase(cache cache.ThirdCache, offlinePushProducer mq.Producer) PushDatabase {
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
if err != nil {
return nil
}
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
if err != nil {
return nil
}
return &pushDataBase{ return &pushDataBase{
cache: cache, cache: cache,
producerToOfflinePush: producerToOfflinePush, producerToOfflinePush: offlinePushProducer,
} }
} }
@ -55,7 +47,12 @@ func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformI
} }
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error { func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs}) data, err := proto.Marshal(&push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String()) if err != nil {
return err
}
if err := p.producerToOfflinePush.SendMessage(ctx, key, data); err != nil {
log.ZError(ctx, "message is push to offlinePush topic", err, "key", key, "userIDs", userIDs, "msg", msg2mq.String())
}
return err return err
} }

View File

@ -30,7 +30,7 @@ import (
) )
type S3Database interface { type S3Database interface {
PartLimit() *s3.PartLimit PartLimit() (*s3.PartLimit, error)
PartSize(ctx context.Context, size int64) (int64, error) PartSize(ctx context.Context, size int64) (int64, error)
AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error) AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error)
InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error) InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error)
@ -65,7 +65,7 @@ func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
return s.s3.PartSize(ctx, size) return s.s3.PartSize(ctx, size)
} }
func (s *s3Database) PartLimit() *s3.PartLimit { func (s *s3Database) PartLimit() (*s3.PartLimit, error) {
return s.s3.PartLimit() return s.s3.PartLimit()
} }

View File

@ -16,6 +16,7 @@ package database
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/pagination" "github.com/openimsdk/tools/db/pagination"
) )
@ -29,3 +30,85 @@ type Black interface {
FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error)
FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error) FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error)
} }
var (
_ Black = (*mgoImpl)(nil)
_ Black = (*redisImpl)(nil)
)
type mgoImpl struct {
}
func (m *mgoImpl) Create(ctx context.Context, blacks []*model.Black) (err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) Delete(ctx context.Context, blacks []*model.Black) (err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) Find(ctx context.Context, blacks []*model.Black) (blackList []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) Take(ctx context.Context, ownerUserID, blockUserID string) (black *model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (m *mgoImpl) FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error) {
//TODO implement me
panic("implement me")
}
type redisImpl struct {
}
func (r *redisImpl) Create(ctx context.Context, blacks []*model.Black) (err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) Delete(ctx context.Context, blacks []*model.Black) (err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) Find(ctx context.Context, blacks []*model.Black) (blackList []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) Take(ctx context.Context, ownerUserID, blockUserID string) (black *model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) {
//TODO implement me
panic("implement me")
}
func (r *redisImpl) FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error) {
//TODO implement me
panic("implement me")
}

View File

@ -0,0 +1,16 @@
package database
import (
"context"
"time"
)
type Cache interface {
Get(ctx context.Context, key []string) (map[string]string, error)
Prefix(ctx context.Context, prefix string) (map[string]string, error)
Set(ctx context.Context, key string, value string, expireAt time.Duration) error
Incr(ctx context.Context, key string, value int) (int, error)
Del(ctx context.Context, key []string) error
Lock(ctx context.Context, key string, duration time.Duration) (string, error)
Unlock(ctx context.Context, key string, value string) error
}

View File

@ -0,0 +1,183 @@
package mgo
import (
"context"
"strconv"
"time"
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/errs"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func NewCacheMgo(db *mongo.Database) (*CacheMgo, error) {
coll := db.Collection(database.CacheName)
_, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{
{
Keys: bson.D{
{Key: "key", Value: 1},
},
Options: options.Index().SetUnique(true),
},
{
Keys: bson.D{
{Key: "expire_at", Value: 1},
},
Options: options.Index().SetExpireAfterSeconds(0),
},
})
if err != nil {
return nil, errs.Wrap(err)
}
return &CacheMgo{coll: coll}, nil
}
type CacheMgo struct {
coll *mongo.Collection
}
func (x *CacheMgo) findToMap(res []model.Cache, now time.Time) map[string]string {
kv := make(map[string]string)
for _, re := range res {
if re.ExpireAt != nil && re.ExpireAt.Before(now) {
continue
}
kv[re.Key] = re.Value
}
return kv
}
func (x *CacheMgo) Get(ctx context.Context, key []string) (map[string]string, error) {
if len(key) == 0 {
return nil, nil
}
now := time.Now()
res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{
"key": bson.M{"$in": key},
"$or": []bson.M{
{"expire_at": bson.M{"$gt": now}},
{"expire_at": nil},
},
})
if err != nil {
return nil, err
}
return x.findToMap(res, now), nil
}
func (x *CacheMgo) Prefix(ctx context.Context, prefix string) (map[string]string, error) {
now := time.Now()
res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{
"key": bson.M{"$regex": "^" + prefix},
"$or": []bson.M{
{"expire_at": bson.M{"$gt": now}},
{"expire_at": nil},
},
})
if err != nil {
return nil, err
}
return x.findToMap(res, now), nil
}
func (x *CacheMgo) Set(ctx context.Context, key string, value string, expireAt time.Duration) error {
cv := &model.Cache{
Key: key,
Value: value,
}
if expireAt > 0 {
now := time.Now().Add(expireAt)
cv.ExpireAt = &now
}
opt := options.Update().SetUpsert(true)
return mongoutil.UpdateOne(ctx, x.coll, bson.M{"key": key}, bson.M{"$set": cv}, false, opt)
}
func (x *CacheMgo) Incr(ctx context.Context, key string, value int) (int, error) {
pipeline := mongo.Pipeline{
{
{"$set", bson.M{
"value": bson.M{
"$toString": bson.M{
"$add": bson.A{
bson.M{"$toInt": "$value"},
value,
},
},
},
}},
},
}
opt := options.FindOneAndUpdate().SetReturnDocument(options.After)
res, err := mongoutil.FindOneAndUpdate[model.Cache](ctx, x.coll, bson.M{"key": key}, pipeline, opt)
if err != nil {
return 0, err
}
return strconv.Atoi(res.Value)
}
func (x *CacheMgo) Del(ctx context.Context, key []string) error {
if len(key) == 0 {
return nil
}
_, err := x.coll.DeleteMany(ctx, bson.M{"key": bson.M{"$in": key}})
return err
}
func (x *CacheMgo) lockKey(key string) string {
return "LOCK_" + key
}
func (x *CacheMgo) Lock(ctx context.Context, key string, duration time.Duration) (string, error) {
tmp, err := uuid.NewUUID()
if err != nil {
return "", err
}
if duration <= 0 || duration > time.Minute*10 {
duration = time.Minute * 10
}
cv := &model.Cache{
Key: x.lockKey(key),
Value: tmp.String(),
ExpireAt: nil,
}
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
wait := func() error {
timeout := time.NewTimer(time.Millisecond * 100)
defer timeout.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timeout.C:
return nil
}
}
for {
if err := mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": key, "expire_at": bson.M{"$lt": time.Now()}}); err != nil {
return "", err
}
expireAt := time.Now().Add(duration)
cv.ExpireAt = &expireAt
if err := mongoutil.InsertMany[*model.Cache](ctx, x.coll, []*model.Cache{cv}); err != nil {
if mongo.IsDuplicateKeyError(err) {
if err := wait(); err != nil {
return "", err
}
continue
}
return "", err
}
return cv.Value, nil
}
}
func (x *CacheMgo) Unlock(ctx context.Context, key string, value string) error {
return mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": x.lockKey(key), "value": value})
}

View File

@ -0,0 +1,133 @@
package mgo
import (
"context"
"strings"
"sync"
"testing"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func TestName1111(t *testing.T) {
coll := Mongodb().Collection("temp")
//updatePipeline := mongo.Pipeline{
// {
// {"$set", bson.M{
// "age": bson.M{
// "$toString": bson.M{
// "$add": bson.A{
// bson.M{"$toInt": "$age"},
// 1,
// },
// },
// },
// }},
// },
//}
pipeline := mongo.Pipeline{
{
{"$set", bson.M{
"value": bson.M{
"$toString": bson.M{
"$add": bson.A{
bson.M{"$toInt": "$value"},
1,
},
},
},
}},
},
}
opt := options.FindOneAndUpdate().SetUpsert(false).SetReturnDocument(options.After)
res, err := mongoutil.FindOneAndUpdate[model.Cache](context.Background(), coll, bson.M{"key": "123456"}, pipeline, opt)
if err != nil {
panic(err)
}
t.Log(res)
}
func TestName33333(t *testing.T) {
c, err := NewCacheMgo(Mongodb())
if err != nil {
panic(err)
}
if err := c.Set(context.Background(), "123456", "123456", time.Hour); err != nil {
panic(err)
}
if err := c.Set(context.Background(), "123666", "123666", time.Hour); err != nil {
panic(err)
}
res1, err := c.Get(context.Background(), []string{"123456"})
if err != nil {
panic(err)
}
t.Log(res1)
res2, err := c.Prefix(context.Background(), "123")
if err != nil {
panic(err)
}
t.Log(res2)
}
func TestName1111aa(t *testing.T) {
c, err := NewCacheMgo(Mongodb())
if err != nil {
panic(err)
}
var count int
key := "123456"
doFunc := func() {
value, err := c.Lock(context.Background(), key, time.Second*30)
if err != nil {
t.Log("Lock error", err)
return
}
tmp := count
tmp++
count = tmp
t.Log("count", tmp)
if err := c.Unlock(context.Background(), key, value); err != nil {
t.Log("Unlock error", err)
return
}
}
if _, err := c.Lock(context.Background(), key, time.Second*10); err != nil {
t.Log(err)
return
}
var wg sync.WaitGroup
for i := 0; i < 32; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
doFunc()
}
}()
}
wg.Wait()
}
func TestName111111a(t *testing.T) {
arr := strings.SplitN("1:testkakskdask:1111", ":", 2)
t.Log(arr)
}

View File

@ -2,16 +2,17 @@ package mgo
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"math" "math"
"math/rand" "math/rand"
"strconv" "strconv"
"testing" "testing"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/tools/db/mongoutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
) )
func TestName1(t *testing.T) { func TestName1(t *testing.T) {
@ -93,7 +94,7 @@ func TestName3(t *testing.T) {
func TestName4(t *testing.T) { func TestName4(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*300) ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
defer cancel() defer cancel()
cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.66:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))) cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
msg, err := NewMsgMongo(cli.Database("openim_v3")) msg, err := NewMsgMongo(cli.Database("openim_v3"))
if err != nil { if err != nil {
@ -109,6 +110,41 @@ func TestName4(t *testing.T) {
} }
func TestName5(t *testing.T) { func TestName5(t *testing.T) {
var v time.Time ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
t.Log(v.UnixMilli()) defer cancel()
cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
tmp, err := NewMsgMongo(cli.Database("openim_v3"))
if err != nil {
panic(err)
}
msg := tmp.(*MsgMgo)
ts := time.Now().Add(-time.Hour * 24 * 5).UnixMilli()
t.Log(ts)
var seqs []int64
for i := 1; i < 256; i++ {
seqs = append(seqs, int64(i))
}
res, err := msg.FindSeqs(ctx, "si_4924054191_9511766539", seqs)
if err != nil {
panic(err)
}
t.Log(res)
} }
//func TestName6(t *testing.T) {
// ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
// defer cancel()
// cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
//
// tmp, err := NewMsgMongo(cli.Database("openim_v3"))
// if err != nil {
// panic(err)
// }
// msg := tmp.(*MsgMgo)
// seq, sendTime, err := msg.findBeforeSendTime(ctx, "si_4924054191_9511766539", 1144)
// if err != nil {
// panic(err)
// }
// t.Log(seq, sendTime)
//}

View File

@ -2,10 +2,11 @@ package mgo
import ( import (
"context" "context"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"testing" "testing"
"time" "time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
) )
func Result[V any](val V, err error) V { func Result[V any](val V, err error) V {
@ -19,7 +20,7 @@ func Mongodb() *mongo.Database {
return Result( return Result(
mongo.Connect(context.Background(), mongo.Connect(context.Background(),
options.Client(). options.Client().
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100"). ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").
SetConnectTimeout(5*time.Second)), SetConnectTimeout(5*time.Second)),
).Database("openim_v3") ).Database("openim_v3")
} }

View File

@ -18,4 +18,5 @@ const (
SeqConversationName = "seq" SeqConversationName = "seq"
SeqUserName = "seq_user" SeqUserName = "seq_user"
StreamMsgName = "stream_msg" StreamMsgName = "stream_msg"
CacheName = "cache"
) )

View File

@ -0,0 +1,9 @@
package model
import "time"
type Cache struct {
Key string `bson:"key"`
Value string `bson:"value"`
ExpireAt *time.Time `bson:"expire_at"`
}

25
pkg/dbbuild/builder.go Normal file
View File

@ -0,0 +1,25 @@
package dbbuild
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/redis/go-redis/v9"
)
type Builder interface {
Mongo(ctx context.Context) (*mongoutil.Client, error)
Redis(ctx context.Context) (redis.UniversalClient, error)
}
func NewBuilder(mongoConf *config.Mongo, redisConf *config.Redis) Builder {
if config.Standalone() {
globalStandalone.setConfig(mongoConf, redisConf)
return globalStandalone
}
return &microservices{
mongo: mongoConf,
redis: redisConf,
}
}

View File

@ -0,0 +1,26 @@
package dbbuild
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/redis/go-redis/v9"
)
type microservices struct {
mongo *config.Mongo
redis *config.Redis
}
func (x *microservices) Mongo(ctx context.Context) (*mongoutil.Client, error) {
return mongoutil.NewMongoDB(ctx, x.mongo.Build())
}
func (x *microservices) Redis(ctx context.Context) (redis.UniversalClient, error) {
if x.redis.Disable {
return nil, nil
}
return redisutil.NewRedisClient(ctx, x.redis.Build())
}

76
pkg/dbbuild/standalone.go Normal file
View File

@ -0,0 +1,76 @@
package dbbuild
import (
"context"
"sync"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil"
"github.com/redis/go-redis/v9"
)
const (
standaloneMongo = "mongo"
standaloneRedis = "redis"
)
var globalStandalone = &standalone{}
type standaloneConn[C any] struct {
Conn C
Err error
}
func (x *standaloneConn[C]) result() (C, error) {
return x.Conn, x.Err
}
type standalone struct {
lock sync.Mutex
mongo *config.Mongo
redis *config.Redis
conn map[string]any
}
func (x *standalone) setConfig(mongoConf *config.Mongo, redisConf *config.Redis) {
x.lock.Lock()
defer x.lock.Unlock()
x.mongo = mongoConf
x.redis = redisConf
}
func (x *standalone) Mongo(ctx context.Context) (*mongoutil.Client, error) {
x.lock.Lock()
defer x.lock.Unlock()
if x.conn == nil {
x.conn = make(map[string]any)
}
v, ok := x.conn[standaloneMongo]
if !ok {
var val standaloneConn[*mongoutil.Client]
val.Conn, val.Err = mongoutil.NewMongoDB(ctx, x.mongo.Build())
v = &val
x.conn[standaloneMongo] = v
}
return v.(*standaloneConn[*mongoutil.Client]).result()
}
func (x *standalone) Redis(ctx context.Context) (redis.UniversalClient, error) {
x.lock.Lock()
defer x.lock.Unlock()
if x.redis.Disable {
return nil, nil
}
if x.conn == nil {
x.conn = make(map[string]any)
}
v, ok := x.conn[standaloneRedis]
if !ok {
var val standaloneConn[redis.UniversalClient]
val.Conn, val.Err = redisutil.NewRedisClient(ctx, x.redis.Build())
v = &val
x.conn[standaloneRedis] = v
}
return v.(*standaloneConn[redis.UniversalClient]).result()
}

Some files were not shown because too many files have changed in this diff Show More