mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-04-04 21:09:51 +08:00
feat: optimize code and support running in single process mode (#3142)
* pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * monolithic * fix: DeleteDoc crash * fix: DeleteDoc crash * fix: monolithic * fix: monolithic * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: mq * fix: mq * fix: user msg timestamp * fix: mq * 1 * 1 * 1 * 1 * 1 * 1 * 1 * seq read config * seq read config * 1 * 1 * fix: the source message of the reference is withdrawn, and the referenced message is deleted * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1
This commit is contained in:
parent
e37ea50b94
commit
9ed6200e45
419
cmd/main.go
Normal file
419
cmd/main.go
Normal file
@ -0,0 +1,419 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/api"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/msggateway"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/auth"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/conversation"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/group"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/msg"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/third"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/user"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/tools/cron"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/standalone"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config.SetStandalone()
|
||||
prommetrics.RegistryAll()
|
||||
}
|
||||
|
||||
func main() {
|
||||
var configPath string
|
||||
flag.StringVar(&configPath, "c", "", "config path")
|
||||
flag.Parse()
|
||||
if configPath == "" {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "config path is empty")
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
cmd := newCmds(configPath)
|
||||
putCmd(cmd, false, auth.Start)
|
||||
putCmd(cmd, false, conversation.Start)
|
||||
putCmd(cmd, false, relation.Start)
|
||||
putCmd(cmd, false, group.Start)
|
||||
putCmd(cmd, false, msg.Start)
|
||||
putCmd(cmd, false, third.Start)
|
||||
putCmd(cmd, false, user.Start)
|
||||
putCmd(cmd, false, push.Start)
|
||||
putCmd(cmd, true, msggateway.Start)
|
||||
putCmd(cmd, true, msgtransfer.Start)
|
||||
putCmd(cmd, true, api.Start)
|
||||
putCmd(cmd, true, cron.Start)
|
||||
ctx := context.Background()
|
||||
if err := cmd.run(ctx); err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "server exit %s", err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func newCmds(confPath string) *cmds {
|
||||
return &cmds{confPath: confPath}
|
||||
}
|
||||
|
||||
type cmdName struct {
|
||||
Name string
|
||||
Func func(ctx context.Context) error
|
||||
Block bool
|
||||
}
|
||||
type cmds struct {
|
||||
confPath string
|
||||
cmds []cmdName
|
||||
config config.AllConfig
|
||||
conf map[string]reflect.Value
|
||||
}
|
||||
|
||||
func (x *cmds) getTypePath(typ reflect.Type) string {
|
||||
return path.Join(typ.PkgPath(), typ.Name())
|
||||
}
|
||||
|
||||
func (x *cmds) initDiscovery() {
|
||||
x.config.Discovery.Enable = "standalone"
|
||||
vof := reflect.ValueOf(&x.config.Discovery.RpcService).Elem()
|
||||
tof := reflect.TypeOf(&x.config.Discovery.RpcService).Elem()
|
||||
num := tof.NumField()
|
||||
for i := 0; i < num; i++ {
|
||||
field := tof.Field(i)
|
||||
if !field.IsExported() {
|
||||
continue
|
||||
}
|
||||
if field.Type.Kind() != reflect.String {
|
||||
continue
|
||||
}
|
||||
vof.Field(i).SetString(field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *cmds) initAllConfig() error {
|
||||
x.conf = make(map[string]reflect.Value)
|
||||
vof := reflect.ValueOf(&x.config).Elem()
|
||||
num := vof.NumField()
|
||||
for i := 0; i < num; i++ {
|
||||
field := vof.Field(i)
|
||||
for ptr := true; ptr; {
|
||||
if field.Kind() == reflect.Ptr {
|
||||
field = field.Elem()
|
||||
} else {
|
||||
ptr = false
|
||||
}
|
||||
}
|
||||
x.conf[x.getTypePath(field.Type())] = field
|
||||
val := field.Addr().Interface()
|
||||
name := val.(interface{ GetConfigFileName() string }).GetConfigFileName()
|
||||
confData, err := os.ReadFile(filepath.Join(x.confPath, name))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
v := viper.New()
|
||||
v.SetConfigType("yaml")
|
||||
if err := v.ReadConfig(bytes.NewReader(confData)); err != nil {
|
||||
return err
|
||||
}
|
||||
opt := func(conf *mapstructure.DecoderConfig) {
|
||||
conf.TagName = config.StructTagName
|
||||
}
|
||||
if err := v.Unmarshal(val, opt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
x.initDiscovery()
|
||||
x.config.Redis.Disable = false
|
||||
x.config.LocalCache = config.LocalCache{}
|
||||
config.InitNotification(&x.config.Notification)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *cmds) parseConf(conf any) error {
|
||||
vof := reflect.ValueOf(conf)
|
||||
for {
|
||||
if vof.Kind() == reflect.Ptr {
|
||||
vof = vof.Elem()
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
tof := vof.Type()
|
||||
numField := vof.NumField()
|
||||
for i := 0; i < numField; i++ {
|
||||
typeField := tof.Field(i)
|
||||
if !typeField.IsExported() {
|
||||
continue
|
||||
}
|
||||
field := vof.Field(i)
|
||||
pkt := x.getTypePath(field.Type())
|
||||
val, ok := x.conf[pkt]
|
||||
if !ok {
|
||||
switch field.Interface().(type) {
|
||||
case config.Index:
|
||||
case config.Path:
|
||||
field.SetString(x.confPath)
|
||||
case config.AllConfig:
|
||||
field.Set(reflect.ValueOf(x.config))
|
||||
case *config.AllConfig:
|
||||
field.Set(reflect.ValueOf(&x.config))
|
||||
default:
|
||||
return fmt.Errorf("config field %s %s not found", vof.Type().Name(), typeField.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
field.Set(val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *cmds) add(name string, block bool, fn func(ctx context.Context) error) {
|
||||
x.cmds = append(x.cmds, cmdName{Name: name, Block: block, Func: fn})
|
||||
}
|
||||
|
||||
func (x *cmds) initLog() error {
|
||||
conf := x.config.Log
|
||||
if err := log.InitLoggerFromConfig(
|
||||
"openim-server",
|
||||
program.GetProcessName(),
|
||||
"", "",
|
||||
conf.RemainLogLevel,
|
||||
conf.IsStdout,
|
||||
conf.IsJson,
|
||||
conf.StorageLocation,
|
||||
conf.RemainRotationCount,
|
||||
conf.RotationTime,
|
||||
strings.TrimSpace(version.Version),
|
||||
conf.IsSimplify,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (x *cmds) run(ctx context.Context) error {
|
||||
if len(x.cmds) == 0 {
|
||||
return fmt.Errorf("no command to run")
|
||||
}
|
||||
if err := x.initAllConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := x.initLog(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.ZError(ctx, "context server exit cause", context.Cause(ctx))
|
||||
}()
|
||||
|
||||
if prometheus := x.config.API.Prometheus; prometheus.Enable {
|
||||
var (
|
||||
port int
|
||||
err error
|
||||
)
|
||||
if !prometheus.AutoSetPorts {
|
||||
port, err = datautil.GetElemByIndex(prometheus.Ports, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ip, err := network.GetLocalIP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("prometheus listen %d error %w", port, err)
|
||||
}
|
||||
defer listener.Close()
|
||||
log.ZDebug(ctx, "prometheus start", "addr", listener.Addr())
|
||||
target, err := json.Marshal(prommetrics.BuildDefaultTarget(ip, listener.Addr().(*net.TCPAddr).Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := standalone.GetKeyValue().SetKey(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), target); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
err := prommetrics.Start(listener)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("http done")
|
||||
}
|
||||
cancel(fmt.Errorf("prometheus %w", err))
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case val := <-sigs:
|
||||
log.ZDebug(ctx, "recv signal", "signal", val.String())
|
||||
cancel(fmt.Errorf("signal %s", val.String()))
|
||||
}
|
||||
}()
|
||||
|
||||
for i := range x.cmds {
|
||||
cmd := x.cmds[i]
|
||||
if cmd.Block {
|
||||
continue
|
||||
}
|
||||
if err := cmd.Func(ctx); err != nil {
|
||||
cancel(fmt.Errorf("server %s exit %w", cmd.Name, err))
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
if cmd.Block {
|
||||
cancel(fmt.Errorf("server %s exit", cmd.Name))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var wait cmdManger
|
||||
for i := range x.cmds {
|
||||
cmd := x.cmds[i]
|
||||
if !cmd.Block {
|
||||
continue
|
||||
}
|
||||
wait.Start(cmd.Name)
|
||||
go func() {
|
||||
defer wait.Shutdown(cmd.Name)
|
||||
if err := cmd.Func(ctx); err != nil {
|
||||
cancel(fmt.Errorf("server %s exit %w", cmd.Name, err))
|
||||
return
|
||||
}
|
||||
cancel(fmt.Errorf("server %s exit", cmd.Name))
|
||||
}()
|
||||
}
|
||||
<-ctx.Done()
|
||||
exitCause := context.Cause(ctx)
|
||||
log.ZWarn(ctx, "notification of service closure", exitCause)
|
||||
done := wait.Wait()
|
||||
timeout := time.NewTimer(time.Second * 10)
|
||||
defer timeout.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
log.ZWarn(ctx, "server exit timeout", nil, "running", wait.Running())
|
||||
return exitCause
|
||||
case _, ok := <-done:
|
||||
if ok {
|
||||
log.ZWarn(ctx, "waiting for the service to exit", nil, "running", wait.Running())
|
||||
} else {
|
||||
log.ZInfo(ctx, "all server exit done")
|
||||
return exitCause
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func putCmd[C any](cmd *cmds, block bool, fn func(ctx context.Context, config *C, client discovery.Conn, server grpc.ServiceRegistrar) error) {
|
||||
name := path.Base(runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
if index := strings.Index(name, "."); index >= 0 {
|
||||
name = name[:index]
|
||||
}
|
||||
cmd.add(name, block, func(ctx context.Context) error {
|
||||
var conf C
|
||||
if err := cmd.parseConf(&conf); err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(ctx, &conf, standalone.GetDiscoveryConn(), standalone.GetServiceRegistrar())
|
||||
})
|
||||
}
|
||||
|
||||
type cmdManger struct {
|
||||
lock sync.Mutex
|
||||
done chan struct{}
|
||||
count int
|
||||
names map[string]struct{}
|
||||
}
|
||||
|
||||
func (x *cmdManger) Start(name string) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.names == nil {
|
||||
x.names = make(map[string]struct{})
|
||||
}
|
||||
if x.done == nil {
|
||||
x.done = make(chan struct{}, 1)
|
||||
}
|
||||
if _, ok := x.names[name]; ok {
|
||||
panic(fmt.Errorf("cmd %s already exists", name))
|
||||
}
|
||||
x.count++
|
||||
x.names[name] = struct{}{}
|
||||
}
|
||||
|
||||
func (x *cmdManger) Shutdown(name string) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if _, ok := x.names[name]; !ok {
|
||||
panic(fmt.Errorf("cmd %s not exists", name))
|
||||
}
|
||||
delete(x.names, name)
|
||||
x.count--
|
||||
if x.count == 0 {
|
||||
close(x.done)
|
||||
} else {
|
||||
select {
|
||||
case x.done <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *cmdManger) Wait() <-chan struct{} {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.count == 0 || x.done == nil {
|
||||
tmp := make(chan struct{})
|
||||
close(tmp)
|
||||
return tmp
|
||||
}
|
||||
return x.done
|
||||
}
|
||||
|
||||
func (x *cmdManger) Running() []string {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
names := make([]string, 0, len(x.names))
|
||||
for name := range x.names {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
6
go.mod
6
go.mod
@ -13,7 +13,7 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/openimsdk/protocol v0.0.72-alpha.71
|
||||
github.com/openimsdk/tools v0.0.50-alpha.70
|
||||
github.com/openimsdk/tools v0.0.50-alpha.74
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
@ -27,7 +27,6 @@ require (
|
||||
require github.com/google/uuid v1.6.0
|
||||
|
||||
require (
|
||||
github.com/IBM/sarama v1.43.0
|
||||
github.com/fatih/color v1.14.1
|
||||
github.com/gin-contrib/gzip v1.0.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
@ -55,6 +54,7 @@ require (
|
||||
cloud.google.com/go/iam v1.1.7 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.5 // indirect
|
||||
cloud.google.com/go/storage v1.40.0 // indirect
|
||||
github.com/IBM/sarama v1.43.0 // indirect
|
||||
github.com/MicahParks/keyfunc v1.9.0 // indirect
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.5 // indirect
|
||||
@ -219,3 +219,5 @@ require (
|
||||
golang.org/x/crypto v0.27.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
)
|
||||
|
||||
//replace github.com/openimsdk/tools => /Users/chao/Desktop/code/tools
|
||||
|
4
go.sum
4
go.sum
@ -349,8 +349,8 @@ github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrk
|
||||
github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
|
||||
github.com/openimsdk/protocol v0.0.72-alpha.71 h1:R3utzOlqepaJWTAmnfJi4ccUM/XIoFasSyjQMOipM70=
|
||||
github.com/openimsdk/protocol v0.0.72-alpha.71/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw=
|
||||
github.com/openimsdk/tools v0.0.50-alpha.70 h1:pyqWkJzXbELWU9KKAsWkj3g0flJYNsDTcjR5SLFQAZU=
|
||||
github.com/openimsdk/tools v0.0.50-alpha.70/go.mod h1:B+oqV0zdewN7OiEHYJm+hW+8/Te7B8tHHgD8rK5ZLZk=
|
||||
github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc=
|
||||
github.com/openimsdk/tools v0.0.50-alpha.74/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
|
@ -30,16 +30,14 @@ type ConfigManager struct {
|
||||
client *clientv3.Client
|
||||
|
||||
configPath string
|
||||
runtimeEnv string
|
||||
}
|
||||
|
||||
func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string, runtimeEnv string) *ConfigManager {
|
||||
func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string) *ConfigManager {
|
||||
cm := &ConfigManager{
|
||||
imAdminUserID: IMAdminUserID,
|
||||
config: cfg,
|
||||
client: client,
|
||||
configPath: configPath,
|
||||
runtimeEnv: runtimeEnv,
|
||||
}
|
||||
return cm
|
||||
}
|
||||
@ -73,7 +71,7 @@ func (cm *ConfigManager) GetConfig(c *gin.Context) {
|
||||
func (cm *ConfigManager) GetConfigList(c *gin.Context) {
|
||||
var resp apistruct.GetConfigListResp
|
||||
resp.ConfigNames = cm.config.GetConfigNames()
|
||||
resp.Environment = runtimeenv.PrintRuntimeEnvironment()
|
||||
resp.Environment = runtimeenv.RuntimeEnvironment()
|
||||
resp.Version = version.Version
|
||||
|
||||
apiresp.GinSuccess(c, resp)
|
||||
@ -209,13 +207,7 @@ func (cm *ConfigManager) resetConfig(c *gin.Context, checkChange bool, ops ...cl
|
||||
|
||||
changedKeys := make([]string, 0, len(configMap))
|
||||
for k, v := range configMap {
|
||||
err := config.Load(
|
||||
cm.configPath,
|
||||
k,
|
||||
config.EnvPrefixMap[k],
|
||||
cm.runtimeEnv,
|
||||
v.new,
|
||||
)
|
||||
err := config.Load(cm.configPath, k, config.EnvPrefixMap[k], v.new)
|
||||
if err != nil {
|
||||
log.ZError(c, "load config failed", err)
|
||||
continue
|
||||
|
@ -20,157 +20,85 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
*conf.AllConfig
|
||||
conf.AllConfig
|
||||
|
||||
RuntimeEnv string
|
||||
ConfigPath string
|
||||
ConfigPath conf.Path
|
||||
Index conf.Index
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, index)
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
|
||||
apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, int(config.Index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv, []string{
|
||||
config.Discovery.RpcService.MessageGateway,
|
||||
})
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getAutoPort := func() (net.Listener, int, error) {
|
||||
registerAddr := net.JoinHostPort(registerIP, "0")
|
||||
listener, err := net.Listen("tcp", registerAddr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
|
||||
}
|
||||
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
return listener, port, nil
|
||||
}
|
||||
|
||||
if config.API.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
|
||||
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
|
||||
}
|
||||
|
||||
router, err := newGinRouter(ctx, client, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.API.Prometheus.Enable {
|
||||
var (
|
||||
listener net.Listener
|
||||
)
|
||||
|
||||
if config.API.Prometheus.AutoSetPorts {
|
||||
listener, prometheusPort, err = getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
|
||||
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "etcd put err")
|
||||
}
|
||||
} else {
|
||||
prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
|
||||
}
|
||||
}
|
||||
|
||||
apiCtx, apiCancel := context.WithCancelCause(context.Background())
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
if err := prommetrics.ApiInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
|
||||
netDone <- struct{}{}
|
||||
httpServer := &http.Server{
|
||||
Handler: router,
|
||||
Addr: net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort)),
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort))
|
||||
|
||||
server := http.Server{Addr: address, Handler: router}
|
||||
log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
|
||||
go func() {
|
||||
err = server.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("api start err: %s", server.Addr))
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}()
|
||||
|
||||
if config.Discovery.Enable == conf.ETCD {
|
||||
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames())
|
||||
cm.Watch(ctx)
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
|
||||
shutdown := func() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
err := server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "shutdown err")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
disetcd.RegisterShutDown(shutdown)
|
||||
defer close(done)
|
||||
select {
|
||||
case <-sigs:
|
||||
program.SIGTERMExit()
|
||||
if err := shutdown(); err != nil {
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
apiCancel(fmt.Errorf("recv ctx %w", context.Cause(ctx)))
|
||||
case <-apiCtx.Done():
|
||||
}
|
||||
case <-netDone:
|
||||
close(netDone)
|
||||
return netErr
|
||||
log.ZDebug(ctx, "api server is shutting down")
|
||||
if err := httpServer.Shutdown(context.Background()); err != nil {
|
||||
log.ZWarn(ctx, "api server shutdown err", err)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
log.CInfo(ctx, "api server is init", "runtimeEnv", runtimeenv.RuntimeEnvironment(), "address", httpServer.Addr, "apiPort", apiPort)
|
||||
err := httpServer.ListenAndServe()
|
||||
if err == nil {
|
||||
err = errors.New("api done")
|
||||
}
|
||||
apiCancel(err)
|
||||
}()
|
||||
|
||||
//if config.Discovery.Enable == conf.ETCD {
|
||||
// cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames())
|
||||
// cm.Watch(ctx)
|
||||
//}
|
||||
//sigs := make(chan os.Signal, 1)
|
||||
//signal.Notify(sigs, syscall.SIGTERM)
|
||||
//select {
|
||||
//case val := <-sigs:
|
||||
// log.ZDebug(ctx, "recv exit", "signal", val.String())
|
||||
// cancel(fmt.Errorf("signal %s", val.String()))
|
||||
//case <-ctx.Done():
|
||||
//}
|
||||
<-apiCtx.Done()
|
||||
exitCause := context.Cause(ctx)
|
||||
log.ZWarn(ctx, "api server exit", exitCause)
|
||||
timer := time.NewTimer(time.Second * 15)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
log.ZWarn(ctx, "api server graceful stop timeout", nil)
|
||||
case <-done:
|
||||
log.ZDebug(ctx, "api server graceful stop done")
|
||||
}
|
||||
return exitCause
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -11,16 +12,16 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
type PrometheusDiscoveryApi struct {
|
||||
config *Config
|
||||
client *clientv3.Client
|
||||
kv discovery.KeyValue
|
||||
}
|
||||
|
||||
func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegistry) *PrometheusDiscoveryApi {
|
||||
func NewPrometheusDiscoveryApi(config *Config, client discovery.Conn) *PrometheusDiscoveryApi {
|
||||
api := &PrometheusDiscoveryApi{
|
||||
config: config,
|
||||
}
|
||||
@ -30,43 +31,26 @@ func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegi
|
||||
return api
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) Enable(c *gin.Context) {
|
||||
if p.config.Discovery.Enable != conf.ETCD {
|
||||
c.JSON(http.StatusOK, []struct{}{})
|
||||
c.Abort()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) discovery(c *gin.Context, key string) {
|
||||
eResp, err := p.client.Get(c, prommetrics.BuildDiscoveryKey(key))
|
||||
value, err := p.kv.GetKey(c, prommetrics.BuildDiscoveryKey(key))
|
||||
if err != nil {
|
||||
// Log and respond with an error if preparation fails.
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "etcd get err"))
|
||||
if errors.Is(err, discovery.ErrNotSupportedKeyValue) {
|
||||
c.JSON(http.StatusOK, []struct{}{})
|
||||
return
|
||||
}
|
||||
if len(eResp.Kvs) == 0 {
|
||||
c.JSON(http.StatusOK, []*prommetrics.Target{})
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "get key value"))
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
resp = &prommetrics.RespTarget{
|
||||
Targets: make([]string, 0, len(eResp.Kvs)),
|
||||
if len(value) == 0 {
|
||||
c.JSON(http.StatusOK, []*prommetrics.RespTarget{})
|
||||
return
|
||||
}
|
||||
)
|
||||
|
||||
for i := range eResp.Kvs {
|
||||
var target prommetrics.Target
|
||||
err = json.Unmarshal(eResp.Kvs[i].Value, &target)
|
||||
if err != nil {
|
||||
log.ZError(c, "prometheus unmarshal err", errs.Wrap(err))
|
||||
var resp prommetrics.RespTarget
|
||||
if err := json.Unmarshal(value, &resp); err != nil {
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "json unmarshal err"))
|
||||
return
|
||||
}
|
||||
resp.Targets = append(resp.Targets, target.Target)
|
||||
if resp.Labels == nil {
|
||||
resp.Labels = target.Labels
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(200, []*prommetrics.RespTarget{resp})
|
||||
c.JSON(http.StatusOK, []*prommetrics.RespTarget{&resp})
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) Api(c *gin.Context) {
|
||||
|
@ -52,7 +52,7 @@ func prommetricsGin() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cfg *Config) (*gin.Engine, error) {
|
||||
func newGinRouter(ctx context.Context, client discovery.Conn, cfg *Config) (*gin.Engine, error) {
|
||||
authConn, err := client.GetConn(ctx, cfg.Discovery.RpcService.Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -283,7 +283,7 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
|
||||
}
|
||||
{
|
||||
pd := NewPrometheusDiscoveryApi(cfg, client)
|
||||
proDiscoveryGroup := r.Group("/prometheus_discovery", pd.Enable)
|
||||
proDiscoveryGroup := r.Group("/prometheus_discovery")
|
||||
proDiscoveryGroup.GET("/api", pd.Api)
|
||||
proDiscoveryGroup.GET("/user", pd.User)
|
||||
proDiscoveryGroup.GET("/group", pd.Group)
|
||||
@ -301,9 +301,8 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
|
||||
if cfg.Discovery.Enable == config.ETCD {
|
||||
etcdClient = client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
}
|
||||
cm := NewConfigManager(cfg.Share.IMAdminUserID, cfg.AllConfig, etcdClient, cfg.ConfigPath, cfg.RuntimeEnv)
|
||||
cm := NewConfigManager(cfg.Share.IMAdminUserID, &cfg.AllConfig, etcdClient, string(cfg.ConfigPath))
|
||||
{
|
||||
|
||||
configGroup := r.Group("/config", cm.CheckAdmin)
|
||||
configGroup.POST("/get_config_list", cm.GetConfigList)
|
||||
configGroup.POST("/get_config", cm.GetConfig)
|
||||
|
@ -29,11 +29,11 @@ import (
|
||||
|
||||
type UserApi struct {
|
||||
Client user.UserClient
|
||||
discov discovery.SvcDiscoveryRegistry
|
||||
discov discovery.Conn
|
||||
config config.RpcService
|
||||
}
|
||||
|
||||
func NewUserApi(client user.UserClient, discov discovery.SvcDiscoveryRegistry, config config.RpcService) UserApi {
|
||||
func NewUserApi(client user.UserClient, discov discovery.Conn, config config.RpcService) UserApi {
|
||||
return UserApi{Client: client, discov: discov, config: config}
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
@ -35,7 +34,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -51,26 +50,26 @@ func (s *Server) InitServer(ctx context.Context, config *Config, disCov discover
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
||||
return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP,
|
||||
conf.MsgGateway.RPC.RegisterIP,
|
||||
conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index,
|
||||
conf.Discovery.RpcService.MessageGateway,
|
||||
nil,
|
||||
conf,
|
||||
[]string{
|
||||
conf.Share.GetConfigFileName(),
|
||||
conf.Discovery.GetConfigFileName(),
|
||||
conf.MsgGateway.GetConfigFileName(),
|
||||
conf.WebhooksConfig.GetConfigFileName(),
|
||||
conf.RedisConfig.GetConfigFileName(),
|
||||
},
|
||||
[]string{
|
||||
conf.Discovery.RpcService.MessageGateway,
|
||||
},
|
||||
s.InitServer,
|
||||
)
|
||||
}
|
||||
//func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
||||
// return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP,
|
||||
// conf.MsgGateway.RPC.RegisterIP,
|
||||
// conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index,
|
||||
// conf.Discovery.RpcService.MessageGateway,
|
||||
// nil,
|
||||
// conf,
|
||||
// []string{
|
||||
// conf.Share.GetConfigFileName(),
|
||||
// conf.Discovery.GetConfigFileName(),
|
||||
// conf.MsgGateway.GetConfigFileName(),
|
||||
// conf.WebhooksConfig.GetConfigFileName(),
|
||||
// conf.RedisConfig.GetConfigFileName(),
|
||||
// },
|
||||
// []string{
|
||||
// conf.Discovery.RpcService.MessageGateway,
|
||||
// },
|
||||
// s.InitServer,
|
||||
// )
|
||||
//}
|
||||
|
||||
type Server struct {
|
||||
msggateway.UnimplementedMsgGatewayServer
|
||||
|
@ -19,10 +19,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
@ -33,26 +35,25 @@ type Config struct {
|
||||
RedisConfig config.Redis
|
||||
WebhooksConfig config.Webhooks
|
||||
Discovery config.Discovery
|
||||
|
||||
RuntimeEnv string
|
||||
Index config.Index
|
||||
}
|
||||
|
||||
// Start run ws server.
|
||||
func Start(ctx context.Context, index int, conf *Config) error {
|
||||
conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv,
|
||||
func Start(ctx context.Context, conf *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
|
||||
"rpcPorts", conf.MsgGateway.RPC.Ports,
|
||||
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
|
||||
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
|
||||
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, int(conf.Index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
|
||||
dbb := dbbuild.NewBuilder(nil, &conf.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
longServer := NewWsServer(
|
||||
conf,
|
||||
WithPort(wsPort),
|
||||
@ -67,12 +68,50 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
return err
|
||||
})
|
||||
|
||||
if err := hubServer.InitServer(ctx, conf, client, server); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go longServer.ChangeOnlineStatus(4)
|
||||
|
||||
netDone := make(chan error)
|
||||
go func() {
|
||||
err = hubServer.Start(ctx, index, conf)
|
||||
netDone <- err
|
||||
}()
|
||||
return hubServer.LongConnServer.Run(netDone)
|
||||
return hubServer.LongConnServer.Run(ctx)
|
||||
}
|
||||
|
||||
//
|
||||
//// Start run ws server.
|
||||
//func Start(ctx context.Context, index int, conf *Config) error {
|
||||
// log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
|
||||
// "rpcPorts", conf.MsgGateway.RPC.Ports,
|
||||
// "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
|
||||
// wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// longServer := NewWsServer(
|
||||
// conf,
|
||||
// WithPort(wsPort),
|
||||
// WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
|
||||
// WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
|
||||
// WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
|
||||
// )
|
||||
//
|
||||
// hubServer := NewServer(longServer, conf, func(srv *Server) error {
|
||||
// var err error
|
||||
// longServer.online, err = rpccache.NewOnlineCache(srv.userClient, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
|
||||
// return err
|
||||
// })
|
||||
//
|
||||
// go longServer.ChangeOnlineStatus(4)
|
||||
//
|
||||
// netDone := make(chan error)
|
||||
// go func() {
|
||||
// err = hubServer.Start(ctx, index, conf)
|
||||
// netDone <- err
|
||||
// }()
|
||||
// return hubServer.LongConnServer.Run(netDone)
|
||||
//}
|
||||
|
@ -2,7 +2,6 @@ package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
@ -11,7 +10,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
pbAuth "github.com/openimsdk/protocol/auth"
|
||||
@ -23,19 +21,18 @@ import (
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type LongConnServer interface {
|
||||
Run(done chan error) error
|
||||
Run(ctx context.Context) error
|
||||
wsHandler(w http.ResponseWriter, r *http.Request)
|
||||
GetUserAllCons(userID string) ([]*Client, bool)
|
||||
GetUserPlatformCons(userID string, platform int) ([]*Client, bool, bool)
|
||||
Validate(s any) error
|
||||
SetDiscoveryRegistry(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config) error
|
||||
SetDiscoveryRegistry(ctx context.Context, client discovery.Conn, config *Config) error
|
||||
KickUserConn(client *Client) error
|
||||
UnRegister(c *Client)
|
||||
SetKickHandlerInfo(i *kickHandler)
|
||||
@ -60,7 +57,7 @@ type WsServer struct {
|
||||
handshakeTimeout time.Duration
|
||||
writeBufferSize int
|
||||
validate *validator.Validate
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
Compressor
|
||||
//Encoder
|
||||
MessageHandler
|
||||
@ -75,7 +72,7 @@ type kickHandler struct {
|
||||
newClient *Client
|
||||
}
|
||||
|
||||
func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.SvcDiscoveryRegistry, config *Config) error {
|
||||
func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.Conn, config *Config) error {
|
||||
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -158,19 +155,14 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) Run(done chan error) error {
|
||||
var (
|
||||
client *Client
|
||||
netErr error
|
||||
shutdownDone = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
server := http.Server{Addr: ":" + stringutil.IntToString(ws.port), Handler: nil}
|
||||
func (ws *WsServer) Run(ctx context.Context) error {
|
||||
var client *Client
|
||||
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-shutdownDone:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case client = <-ws.registerChan:
|
||||
ws.registerClient(client)
|
||||
@ -181,57 +173,56 @@ func (ws *WsServer) Run(done chan error) error {
|
||||
}
|
||||
}
|
||||
}()
|
||||
netDone := make(chan struct{}, 1)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wsServer := http.Server{Addr: fmt.Sprintf(":%d", ws.port), Handler: nil}
|
||||
http.HandleFunc("/", ws.wsHandler)
|
||||
err := server.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, "ws start err", server.Addr)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
go func() {
|
||||
defer close(done)
|
||||
<-ctx.Done()
|
||||
_ = wsServer.Shutdown(context.Background())
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
shutDown := func() error {
|
||||
sErr := server.Shutdown(ctx)
|
||||
if sErr != nil {
|
||||
return errs.WrapMsg(sErr, "shutdown err")
|
||||
err := wsServer.ListenAndServe()
|
||||
if err == nil {
|
||||
err = fmt.Errorf("http server closed")
|
||||
}
|
||||
close(shutdownDone)
|
||||
return nil
|
||||
}
|
||||
etcd.RegisterShutDown(shutDown)
|
||||
defer cancel()
|
||||
var err error
|
||||
cancel(fmt.Errorf("msg gateway %w", err))
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
timeout := time.NewTimer(time.Second * 15)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case err = <-done:
|
||||
if err := shutDown(); err != nil {
|
||||
return err
|
||||
case <-timeout.C:
|
||||
log.ZWarn(ctx, "msg gateway graceful stop timeout", nil)
|
||||
case <-done:
|
||||
log.ZDebug(ctx, "msg gateway graceful stop done")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-netDone:
|
||||
}
|
||||
return netErr
|
||||
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
|
||||
var concurrentRequest = 3
|
||||
const concurrentRequest = 3
|
||||
|
||||
func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *Client) error {
|
||||
conns, err := ws.disCov.GetConns(ctx, ws.msgGatewayConfig.Discovery.RpcService.MessageGateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(conns) == 0 || (len(conns) == 1 && ws.disCov.IsSelfNode(conns[0])) {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg := errgroup.Group{}
|
||||
wg.SetLimit(concurrentRequest)
|
||||
|
||||
// Online push user online message to other node
|
||||
for _, v := range conns {
|
||||
v := v
|
||||
log.ZDebug(ctx, " sendUserOnlineInfoToOtherNode conn ", "target", v.Target())
|
||||
if v.Target() == ws.disCov.GetSelfConnTarget() {
|
||||
log.ZDebug(ctx, "Filter out this node", "node", v.Target())
|
||||
log.ZDebug(ctx, "sendUserOnlineInfoToOtherNode conn")
|
||||
if ws.disCov.IsSelfNode(v) {
|
||||
log.ZDebug(ctx, "Filter out this node")
|
||||
continue
|
||||
}
|
||||
|
||||
@ -242,7 +233,7 @@ func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *C
|
||||
PlatformID: int32(client.PlatformID), Token: client.token,
|
||||
})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err, "node", v.Target())
|
||||
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -16,51 +16,35 @@ package msgtransfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/mq"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type MsgTransfer struct {
|
||||
historyConsumer mq.Consumer
|
||||
historyMongoConsumer mq.Consumer
|
||||
// This consumer aggregated messages, subscribed to the topic:toRedis,
|
||||
// the message is stored in redis, Incr Redis, and then the message is sent to toPush topic for push,
|
||||
// and the message is sent to toMongo topic for persistence
|
||||
historyCH *OnlineHistoryRedisConsumerHandler
|
||||
historyHandler *OnlineHistoryRedisConsumerHandler
|
||||
//This consumer handle message to mongo
|
||||
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
||||
historyMongoHandler *OnlineHistoryMongoConsumerHandler
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
runTimeEnv string
|
||||
//cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@ -71,48 +55,59 @@ type Config struct {
|
||||
Share conf.Share
|
||||
WebhooksConfig conf.Webhooks
|
||||
Discovery conf.Discovery
|
||||
Index conf.Index
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
|
||||
config.MsgTransfer.Prometheus.Ports, "index", index)
|
||||
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "prometheusPorts",
|
||||
config.MsgTransfer.Prometheus.Ports, "index", config.Index)
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := discRegister.NewDiscoveryRegister(&config.Discovery, runTimeEnv, nil)
|
||||
|
||||
//if config.Discovery.Enable == conf.ETCD {
|
||||
// cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
|
||||
// config.MsgTransfer.GetConfigFileName(),
|
||||
// config.RedisConfig.GetConfigFileName(),
|
||||
// config.MongodbConfig.GetConfigFileName(),
|
||||
// config.KafkaConfig.GetConfigFileName(),
|
||||
// config.Share.GetConfigFileName(),
|
||||
// config.WebhooksConfig.GetConfigFileName(),
|
||||
// config.Discovery.GetConfigFileName(),
|
||||
// conf.LogConfigFileName,
|
||||
// })
|
||||
// cm.Watch(ctx)
|
||||
//}
|
||||
mongoProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToMongoTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
|
||||
if config.Discovery.Enable == conf.ETCD {
|
||||
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
|
||||
config.MsgTransfer.GetConfigFileName(),
|
||||
config.RedisConfig.GetConfigFileName(),
|
||||
config.MongodbConfig.GetConfigFileName(),
|
||||
config.KafkaConfig.GetConfigFileName(),
|
||||
config.Share.GetConfigFileName(),
|
||||
config.WebhooksConfig.GetConfigFileName(),
|
||||
config.Discovery.GetConfigFileName(),
|
||||
conf.LogConfigFileName,
|
||||
})
|
||||
cm.Watch(ctx)
|
||||
pushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToPushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel := redis.NewMsgCache(rdb, msgDocModel)
|
||||
var msgModel cache.MsgCache
|
||||
if rdb == nil {
|
||||
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel = mcache.NewMsgCache(cm, msgDocModel)
|
||||
} else {
|
||||
msgModel = redis.NewMsgCache(rdb, msgDocModel)
|
||||
}
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -123,124 +118,68 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, mongoProducer, pushProducer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyCH, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase)
|
||||
historyConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToRedisTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase)
|
||||
historyMongoConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToMongoTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyHandler, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyMongoHandler := NewOnlineHistoryMongoConsumerHandler(msgTransferDatabase)
|
||||
|
||||
msgTransfer := &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
runTimeEnv: runTimeEnv,
|
||||
historyConsumer: historyConsumer,
|
||||
historyMongoConsumer: historyMongoConsumer,
|
||||
historyHandler: historyHandler,
|
||||
historyMongoHandler: historyMongoHandler,
|
||||
}
|
||||
|
||||
return msgTransfer.Start(index, config, client)
|
||||
return msgTransfer.Start(ctx)
|
||||
}
|
||||
|
||||
func (m *MsgTransfer) Start(index int, config *Config, client discovery.SvcDiscoveryRegistry) error {
|
||||
m.ctx, m.cancel = context.WithCancel(context.Background())
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
)
|
||||
|
||||
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH)
|
||||
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH)
|
||||
go m.historyCH.HandleUserHasReadSeqMessages(m.ctx)
|
||||
err := m.historyCH.redisMessageBatches.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getAutoPort := func() (net.Listener, int, error) {
|
||||
registerAddr := net.JoinHostPort(registerIP, "0")
|
||||
listener, err := net.Listen("tcp", registerAddr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
|
||||
}
|
||||
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
return listener, port, nil
|
||||
}
|
||||
|
||||
if config.MsgTransfer.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
|
||||
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
|
||||
}
|
||||
|
||||
if config.MsgTransfer.Prometheus.Enable {
|
||||
var (
|
||||
listener net.Listener
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
if config.MsgTransfer.Prometheus.AutoSetPorts {
|
||||
listener, prometheusPort, err = getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
|
||||
_, err = etcdClient.Put(context.TODO(), prommetrics.BuildDiscoveryKey(prommetrics.MessageTransferKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "etcd put err")
|
||||
}
|
||||
} else {
|
||||
prometheusPort, err = datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
|
||||
}
|
||||
}
|
||||
func (m *MsgTransfer) Start(ctx context.Context) error {
|
||||
var cancel context.CancelCauseFunc
|
||||
m.ctx, cancel = context.WithCancelCause(ctx)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.ZPanic(m.ctx, "MsgTransfer Start Panic", errs.ErrPanic(r))
|
||||
for {
|
||||
if err := m.historyConsumer.Subscribe(m.ctx, m.historyHandler.HandlerRedisMessage); err != nil {
|
||||
cancel(fmt.Errorf("history consumer %w", err))
|
||||
log.ZError(m.ctx, "historyConsumer err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if err := prommetrics.TransferInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
select {
|
||||
case <-sigs:
|
||||
program.SIGTERMExit()
|
||||
// graceful close kafka client.
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
go func() {
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
m.historyMongoHandler.HandleChatWs2Mongo(ctx, key, value)
|
||||
return nil
|
||||
case <-netDone:
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
close(netDone)
|
||||
return netErr
|
||||
}
|
||||
for {
|
||||
if err := m.historyMongoConsumer.Subscribe(m.ctx, fn); err != nil {
|
||||
cancel(fmt.Errorf("history mongo consumer %w", err))
|
||||
log.ZError(m.ctx, "historyMongoConsumer err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go m.historyHandler.HandleUserHasReadSeqMessages(m.ctx)
|
||||
|
||||
err := m.historyHandler.redisMessageBatches.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
<-m.ctx.Done()
|
||||
return context.Cause(m.ctx)
|
||||
}
|
||||
|
@ -18,14 +18,13 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
@ -37,7 +36,6 @@ import (
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@ -64,9 +62,7 @@ type userHasReadSeq struct {
|
||||
}
|
||||
|
||||
type OnlineHistoryRedisConsumerHandler struct {
|
||||
historyConsumerGroup *kafka.MConsumerGroup
|
||||
|
||||
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
|
||||
redisMessageBatches *batcher.Batcher[ConsumerMessage]
|
||||
|
||||
msgTransferDatabase controller.MsgTransferDatabase
|
||||
conversationUserHasReadChan chan *userHasReadSeq
|
||||
@ -76,12 +72,13 @@ type OnlineHistoryRedisConsumerHandler struct {
|
||||
conversationClient *rpcli.ConversationClient
|
||||
}
|
||||
|
||||
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) {
|
||||
kafkaConf := config.KafkaConfig
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
type ConsumerMessage struct {
|
||||
Ctx context.Context
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.Conn, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) {
|
||||
groupConn, err := client.GetConn(ctx, config.Discovery.RpcService.Group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -97,7 +94,7 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
|
||||
och.conversationClient = rpcli.NewConversationClient(conversationConn)
|
||||
och.wg.Add(1)
|
||||
|
||||
b := batcher.New[sarama.ConsumerMessage](
|
||||
b := batcher.New[ConsumerMessage](
|
||||
batcher.WithSize(size),
|
||||
batcher.WithWorker(worker),
|
||||
batcher.WithInterval(interval),
|
||||
@ -109,16 +106,15 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
|
||||
hashCode := stringutil.GetHashCode(key)
|
||||
return int(hashCode) % och.redisMessageBatches.Worker()
|
||||
}
|
||||
b.Key = func(consumerMessage *sarama.ConsumerMessage) string {
|
||||
return string(consumerMessage.Key)
|
||||
b.Key = func(consumerMessage *ConsumerMessage) string {
|
||||
return consumerMessage.Key
|
||||
}
|
||||
b.Do = och.do
|
||||
och.redisMessageBatches = b
|
||||
och.historyConsumerGroup = historyConsumerGroup
|
||||
|
||||
return &och, nil
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[ConsumerMessage]) {
|
||||
ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID())
|
||||
ctxMessages := och.parseConsumerMessages(ctx, val.Val())
|
||||
ctx = withAggregationCtx(ctx, ctxMessages)
|
||||
@ -189,7 +185,7 @@ func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*ConsumerMessage) []*ContextMsg {
|
||||
var ctxMessages []*ContextMsg
|
||||
for i := 0; i < len(consumerMessages); i++ {
|
||||
ctxMsg := &ContextMsg{}
|
||||
@ -199,16 +195,9 @@ func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.
|
||||
log.ZWarn(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value))
|
||||
continue
|
||||
}
|
||||
var arr []string
|
||||
for i, header := range consumerMessages[i].Headers {
|
||||
arr = append(arr, strconv.Itoa(i), string(header.Key), string(header.Value))
|
||||
}
|
||||
log.ZDebug(ctx, "consumer.kafka.GetContextWithMQHeader", "len", len(consumerMessages[i].Headers),
|
||||
"header", strings.Join(arr, ", "))
|
||||
ctxMsg.ctx = kafka.GetContextWithMQHeader(consumerMessages[i].Headers)
|
||||
ctxMsg.ctx = consumerMessages[i].Ctx
|
||||
ctxMsg.message = msgFromMQ
|
||||
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key",
|
||||
string(consumerMessages[i].Key))
|
||||
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key", consumerMessages[i].Key)
|
||||
ctxMessages = append(ctxMessages, ctxMsg)
|
||||
}
|
||||
return ctxMessages
|
||||
@ -383,7 +372,9 @@ func (och *OnlineHistoryRedisConsumerHandler) Close() {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
|
||||
for _, v := range msgs {
|
||||
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
|
||||
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
|
||||
if err := och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message); err != nil {
|
||||
log.ZError(ctx, "msg to push topic error", err, "msg", v.message.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -401,35 +392,10 @@ func withAggregationCtx(ctx context.Context, values []*ContextMsg) context.Conte
|
||||
return mcontext.SetOperationID(ctx, allMessageOperationID)
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
|
||||
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
|
||||
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
|
||||
session.MarkMessage(lastMessage, "")
|
||||
session.Commit()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-claim.Messages():
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(msg.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
err := och.redisMessageBatches.Put(context.Background(), msg)
|
||||
func (och *OnlineHistoryRedisConsumerHandler) HandlerRedisMessage(ctx context.Context, key string, value []byte) error { // a instance in the consumer group
|
||||
err := och.redisMessageBatches.Put(ctx, &ConsumerMessage{Ctx: ctx, Key: key, Value: value})
|
||||
if err != nil {
|
||||
log.ZWarn(context.Background(), "put msg to error", err, "msg", msg)
|
||||
log.ZWarn(ctx, "put msg to error", err, "key", key, "value", value)
|
||||
}
|
||||
case <-session.Context().Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,36 +17,24 @@ package msgtransfer
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
pbmsg "github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type OnlineHistoryMongoConsumerHandler struct {
|
||||
historyConsumerGroup *kafka.MConsumerGroup
|
||||
msgTransferDatabase controller.MsgTransferDatabase
|
||||
}
|
||||
|
||||
func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) {
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := &OnlineHistoryMongoConsumerHandler{
|
||||
historyConsumerGroup: historyConsumerGroup,
|
||||
func NewOnlineHistoryMongoConsumerHandler(database controller.MsgTransferDatabase) *OnlineHistoryMongoConsumerHandler {
|
||||
return &OnlineHistoryMongoConsumerHandler{
|
||||
msgTransferDatabase: database,
|
||||
}
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Context, cMsg *sarama.ConsumerMessage, key string, session sarama.ConsumerGroupSession) {
|
||||
msg := cMsg.Value
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) HandleChatWs2Mongo(ctx context.Context, key string, msg []byte) {
|
||||
msgFromMQ := pbmsg.MsgDataToMongoByMQ{}
|
||||
err := proto.Unmarshal(msg, &msgFromMQ)
|
||||
if err != nil {
|
||||
@ -54,7 +42,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
return
|
||||
}
|
||||
if len(msgFromMQ.MsgData) == 0 {
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "key", key, "msg", msg)
|
||||
return
|
||||
}
|
||||
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
|
||||
@ -82,22 +70,3 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
// msgFromMQ.MsgData, "conversationID", msgFromMQ.ConversationID)
|
||||
//}
|
||||
}
|
||||
|
||||
func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // an instance in the consumer group
|
||||
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
for msg := range claim.Messages() {
|
||||
ctx := mc.historyConsumerGroup.GetContextFromMsg(msg)
|
||||
if len(msg.Value) != 0 {
|
||||
mc.handleChatWs2Mongo(ctx, msg, string(msg.Key), sess)
|
||||
} else {
|
||||
log.ZError(ctx, "mongo msg get from kafka but is nil", nil, "conversationID", msg.Key)
|
||||
}
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ package push
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
|
@ -3,7 +3,6 @@ package push
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
@ -12,40 +11,21 @@ import (
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type OfflinePushConsumerHandler struct {
|
||||
OfflinePushConsumerGroup *kafka.MConsumerGroup
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
}
|
||||
|
||||
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) {
|
||||
var offlinePushConsumerHandler OfflinePushConsumerHandler
|
||||
var err error
|
||||
offlinePushConsumerHandler.offlinePusher = offlinePusher
|
||||
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
|
||||
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewOfflinePushConsumerHandler(offlinePusher offlinepush.OfflinePusher) *OfflinePushConsumerHandler {
|
||||
return &OfflinePushConsumerHandler{
|
||||
offlinePusher: offlinePusher,
|
||||
}
|
||||
return &offlinePushConsumerHandler, nil
|
||||
}
|
||||
|
||||
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
for msg := range claim.Messages() {
|
||||
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
|
||||
o.handleMsg2OfflinePush(ctx, msg.Value)
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
|
||||
func (o *OfflinePushConsumerHandler) HandleMsg2OfflinePush(ctx context.Context, msg []byte) {
|
||||
offlinePushMsg := pbpush.PushMsgReq{}
|
||||
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
|
||||
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
|
||||
|
@ -2,7 +2,7 @@ package push
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
@ -30,37 +31,36 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
|
||||
return &emptyOnlinePusher{}
|
||||
}
|
||||
|
||||
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
|
||||
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
|
||||
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
|
||||
return nil, nil
|
||||
}
|
||||
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
|
||||
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
|
||||
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) OnlinePusher {
|
||||
|
||||
if config.runTimeEnv == conf.KUBERNETES {
|
||||
return NewDefaultAllNode(disCov, config)
|
||||
func NewOnlinePusher(disCov discovery.Conn, config *Config) (OnlinePusher, error) {
|
||||
if conf.Standalone() {
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
}
|
||||
if runtimeenv.RuntimeEnvironment() == conf.KUBERNETES {
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
}
|
||||
switch config.Discovery.Enable {
|
||||
case conf.ETCD:
|
||||
return NewDefaultAllNode(disCov, config)
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
default:
|
||||
log.ZError(context.Background(), "NewOnlinePusher is error", errs.Wrap(errors.New("unsupported discovery type")), "type", config.Discovery.Enable)
|
||||
return nil
|
||||
return nil, errs.New(fmt.Sprintf("unsupported discovery type %s", config.Discovery.Enable))
|
||||
}
|
||||
}
|
||||
|
||||
type DefaultAllNode struct {
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
config *Config
|
||||
}
|
||||
|
||||
func NewDefaultAllNode(disCov discovery.SvcDiscoveryRegistry, config *Config) *DefaultAllNode {
|
||||
func NewDefaultAllNode(disCov discovery.Conn, config *Config) *DefaultAllNode {
|
||||
return &DefaultAllNode{disCov: disCov, config: config}
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ func (k *K8sStaticConsistentHash) GetConnsAndOnlinePush(ctx context.Context, msg
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost)
|
||||
var usersConns = make(map[*grpc.ClientConn][]string)
|
||||
var usersConns = make(map[grpc.ClientConnInterface][]string)
|
||||
for host, userIds := range usersHost {
|
||||
tconn, _ := k.disCov.GetConn(ctx, host)
|
||||
usersConns[tconn] = userIds
|
||||
|
@ -2,39 +2,43 @@ package push
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
pbpush "github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type pushServer struct {
|
||||
pbpush.UnimplementedPushMsgServiceServer
|
||||
database controller.PushDatabase
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
pushCh *ConsumerHandler
|
||||
offlinePushCh *OfflinePushConsumerHandler
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
RpcConfig config.Push
|
||||
RedisConfig config.Redis
|
||||
MongoConfig config.Mongo
|
||||
KafkaConfig config.Kafka
|
||||
NotificationConfig config.Notification
|
||||
Share config.Share
|
||||
WebhooksConfig config.Webhooks
|
||||
LocalCacheConfig config.LocalCache
|
||||
Discovery config.Discovery
|
||||
FcmConfigPath string
|
||||
|
||||
runTimeEnv string
|
||||
FcmConfigPath config.Path
|
||||
}
|
||||
|
||||
func (p pushServer) DelUserPushToken(ctx context.Context,
|
||||
@ -45,42 +49,90 @@ func (p pushServer) DelUserPushToken(ctx context.Context,
|
||||
return &pbpush.DelUserPushTokenResp{}, nil
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
config.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheModel := redis.NewThirdCache(rdb)
|
||||
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, config.FcmConfigPath)
|
||||
var cacheModel cache.ThirdCache
|
||||
if rdb == nil {
|
||||
mdb, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mc, err := mgo.NewCacheMgo(mdb.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheModel = mcache.NewThirdCache(mc)
|
||||
} else {
|
||||
cacheModel = redis.NewThirdCache(rdb)
|
||||
}
|
||||
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, string(config.FcmConfigPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
|
||||
offlinePushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToOfflinePushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
database := controller.NewPushDatabase(cacheModel, offlinePushProducer)
|
||||
|
||||
pushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToPushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offlinePushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToOfflinePushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
|
||||
|
||||
consumer, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
|
||||
pushHandler, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offlineHandler := NewOfflinePushConsumerHandler(offlinePusher)
|
||||
|
||||
pbpush.RegisterPushMsgServiceServer(server, &pushServer{
|
||||
database: database,
|
||||
disCov: client,
|
||||
offlinePusher: offlinePusher,
|
||||
pushCh: consumer,
|
||||
offlinePushCh: offlinePushConsumer,
|
||||
})
|
||||
|
||||
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer)
|
||||
go func() {
|
||||
pushHandler.WaitCache()
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
pushHandler.HandleMs2PsChat(ctx, value)
|
||||
return nil
|
||||
}
|
||||
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
|
||||
log.ZInfo(consumerCtx, "begin consume messages")
|
||||
for {
|
||||
if err := pushConsumer.Subscribe(consumerCtx, fn); err != nil {
|
||||
log.ZError(consumerCtx, "subscribe err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer)
|
||||
go func() {
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
offlineHandler.HandleMsg2OfflinePush(ctx, value)
|
||||
return nil
|
||||
}
|
||||
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
|
||||
log.ZInfo(consumerCtx, "begin consume messages")
|
||||
for {
|
||||
if err := offlinePushConsumer.Subscribe(consumerCtx, fn); err != nil {
|
||||
log.ZError(consumerCtx, "subscribe err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -3,13 +3,8 @@ package push
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
@ -17,6 +12,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
@ -25,7 +21,6 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
@ -34,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
type ConsumerHandler struct {
|
||||
pushConsumerGroup *kafka.MConsumerGroup
|
||||
//pushConsumerGroup mq.Consumer
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
onlinePusher OnlinePusher
|
||||
pushDatabase controller.PushDatabase
|
||||
@ -49,15 +44,7 @@ type ConsumerHandler struct {
|
||||
conversationClient *rpcli.ConversationClient
|
||||
}
|
||||
|
||||
func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
|
||||
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
|
||||
var consumerHandler ConsumerHandler
|
||||
var err error
|
||||
consumerHandler.pushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToPushGroupID,
|
||||
[]string{config.KafkaConfig.ToPushTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.Conn) (*ConsumerHandler, error) {
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -74,13 +61,18 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
onlinePusher, err := NewOnlinePusher(client, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var consumerHandler ConsumerHandler
|
||||
consumerHandler.userClient = rpcli.NewUserClient(userConn)
|
||||
consumerHandler.groupClient = rpcli.NewGroupClient(groupConn)
|
||||
consumerHandler.msgClient = rpcli.NewMsgClient(msgConn)
|
||||
consumerHandler.conversationClient = rpcli.NewConversationClient(conversationConn)
|
||||
|
||||
consumerHandler.offlinePusher = offlinePusher
|
||||
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
||||
consumerHandler.onlinePusher = onlinePusher
|
||||
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
||||
@ -93,7 +85,7 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
|
||||
return &consumerHandler, nil
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
func (c *ConsumerHandler) HandleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
msgFromMQ := pbpush.PushMsgReq{}
|
||||
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
|
||||
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
|
||||
@ -127,25 +119,12 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
func (c *ConsumerHandler) WaitCache() {
|
||||
c.onlineCache.Lock.Lock()
|
||||
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
|
||||
c.onlineCache.Cond.Wait()
|
||||
}
|
||||
c.onlineCache.Lock.Unlock()
|
||||
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
|
||||
log.ZInfo(ctx, "begin consume messages")
|
||||
|
||||
for msg := range claim.Messages() {
|
||||
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
|
||||
c.handleMs2PsChat(ctx, msg.Value)
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
|
||||
|
@ -18,11 +18,14 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
@ -43,7 +46,7 @@ import (
|
||||
type authServer struct {
|
||||
pbauth.UnimplementedAuthServer
|
||||
authDatabase controller.AuthDatabase
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
userClient *rpcli.UserClient
|
||||
}
|
||||
@ -51,15 +54,31 @@ type authServer struct {
|
||||
type Config struct {
|
||||
RpcConfig config.Auth
|
||||
RedisConfig config.Redis
|
||||
MongoConfig config.Mongo
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var token cache.TokenModel
|
||||
if rdb == nil {
|
||||
mdb, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mc, err := mgo.NewCacheMgo(mdb.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
token = mcache.NewTokenCacheModel(mc, config.RpcConfig.TokenPolicy.Expire)
|
||||
} else {
|
||||
token = redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire)
|
||||
}
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -67,7 +86,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
pbauth.RegisterAuthServer(server, &authServer{
|
||||
RegisterCenter: client,
|
||||
authDatabase: controller.NewAuthDatabase(
|
||||
redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire),
|
||||
token,
|
||||
config.Share.Secret,
|
||||
config.RpcConfig.TokenPolicy.Expire,
|
||||
config.Share.MultiLogin,
|
||||
@ -192,7 +211,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
|
||||
return err
|
||||
}
|
||||
for _, v := range conns {
|
||||
log.ZDebug(ctx, "forceKickOff", "conn", v.Target())
|
||||
log.ZDebug(ctx, "forceKickOff", "userID", userID, "platformID", platformID)
|
||||
client := msggateway.NewMsgGatewayClient(v)
|
||||
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
|
||||
_, err := client.KickUserOffline(ctx, kickReq)
|
||||
|
@ -19,24 +19,22 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
dbModel "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@ -66,12 +64,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -96,7 +95,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
pbconversation.RegisterConversationServer(server, &conversationServer{
|
||||
conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, msgClient),
|
||||
conversationDatabase: controller.NewConversationDatabase(conversationDB,
|
||||
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, redis.GetRocksCacheOptions(), conversationDB), mgocli.GetTx()),
|
||||
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, conversationDB), mgocli.GetTx()),
|
||||
userClient: rpcli.NewUserClient(userConn),
|
||||
groupClient: rpcli.NewGroupClient(groupConn),
|
||||
msgClient: msgClient,
|
||||
|
@ -17,13 +17,15 @@ package group
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
@ -42,8 +44,6 @@ import (
|
||||
pbgroup "github.com/openimsdk/protocol/group"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/protocol/wrapperspb"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@ -76,12 +76,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -97,11 +98,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
//msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
//conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -16,22 +16,24 @@ package msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/notification"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -56,7 +58,7 @@ type Config struct {
|
||||
// MsgServer encapsulates dependencies required for message handling.
|
||||
type msgServer struct {
|
||||
msg.UnimplementedMsgServer
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry // Service discovery registry for service registration.
|
||||
RegisterCenter discovery.Conn // Service discovery registry for service registration.
|
||||
MsgDatabase controller.CommonMsgDatabase // Interface for message database operations.
|
||||
StreamMsgDatabase controller.StreamMsgDatabase
|
||||
UserLocalCache *rpccache.UserLocalCache // Local cache for user data.
|
||||
@ -76,12 +78,18 @@ func (m *msgServer) addInterceptorHandler(interceptorFunc ...MessageInterceptorF
|
||||
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
redisProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToRedisTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -89,7 +97,16 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel := redis.NewMsgCache(rdb, msgDocModel)
|
||||
var msgModel cache.MsgCache
|
||||
if rdb == nil {
|
||||
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel = mcache.NewMsgCache(cm, msgDocModel)
|
||||
} else {
|
||||
msgModel = redis.NewMsgCache(rdb, msgDocModel)
|
||||
}
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -104,10 +121,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -125,6 +138,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
return err
|
||||
}
|
||||
conversationClient := rpcli.NewConversationClient(conversationConn)
|
||||
msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, redisProducer)
|
||||
s := &msgServer{
|
||||
MsgDatabase: msgDatabase,
|
||||
StreamMsgDatabase: controller.NewStreamMsgDatabase(streamMsg),
|
||||
|
@ -16,26 +16,25 @@ package relation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/tools/mq/memamq"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/relation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@ -47,7 +46,7 @@ type friendServer struct {
|
||||
db controller.FriendDatabase
|
||||
blackDatabase controller.BlackDatabase
|
||||
notificationSender *FriendNotificationSender
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
webhookClient *webhook.Client
|
||||
queue *memamq.MemoryQueue
|
||||
@ -66,12 +65,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -114,12 +114,12 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
db: controller.NewFriendDatabase(
|
||||
friendMongoDB,
|
||||
friendRequestMongoDB,
|
||||
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, redis.GetRocksCacheOptions()),
|
||||
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB),
|
||||
mgocli.GetTx(),
|
||||
),
|
||||
blackDatabase: controller.NewBlackDatabase(
|
||||
blackMongoDB,
|
||||
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, redis.GetRocksCacheOptions()),
|
||||
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB),
|
||||
),
|
||||
notificationSender: notificationSender,
|
||||
RegisterCenter: client,
|
||||
|
@ -19,11 +19,12 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
@ -37,7 +38,10 @@ import (
|
||||
)
|
||||
|
||||
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
|
||||
limit := t.s3dataBase.PartLimit()
|
||||
limit, err := t.s3dataBase.PartLimit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.PartLimitResp{
|
||||
MinPartSize: limit.MinPartSize,
|
||||
MaxPartSize: limit.MaxPartSize,
|
||||
|
@ -17,9 +17,14 @@ package third
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/s3/disable"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
@ -29,8 +34,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
@ -60,15 +63,17 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logdb, err := mgo.NewLogMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -77,15 +82,31 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var thirdCache cache.ThirdCache
|
||||
if rdb == nil {
|
||||
tc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thirdCache = mcache.NewThirdCache(tc)
|
||||
} else {
|
||||
thirdCache = redis.NewThirdCache(rdb)
|
||||
}
|
||||
// Select the oss method according to the profile policy
|
||||
enable := config.RpcConfig.Object.Enable
|
||||
var (
|
||||
o s3.Interface
|
||||
)
|
||||
switch enable {
|
||||
var o s3.Interface
|
||||
switch enable := config.RpcConfig.Object.Enable; enable {
|
||||
case "minio":
|
||||
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
var minioCache minio.Cache
|
||||
if rdb == nil {
|
||||
mc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minioCache = mcache.NewMinioCache(mc)
|
||||
} else {
|
||||
minioCache = redis.NewMinioCache(rdb)
|
||||
}
|
||||
o, err = minio.NewMinio(ctx, minioCache, *config.MinioConfig.Build())
|
||||
case "cos":
|
||||
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
|
||||
case "oss":
|
||||
@ -94,6 +115,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
|
||||
case "aws":
|
||||
o, err = aws.NewAws(*config.RpcConfig.Object.Aws.Build())
|
||||
case "":
|
||||
o = disable.NewDisable()
|
||||
default:
|
||||
err = fmt.Errorf("invalid object enable: %s", enable)
|
||||
}
|
||||
@ -106,7 +129,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
}
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
third.RegisterThirdServer(server, &thirdServer{
|
||||
thirdDatabase: controller.NewThirdDatabase(redis.NewThirdCache(rdb), logdb),
|
||||
thirdDatabase: controller.NewThirdDatabase(thirdCache, logdb),
|
||||
s3dataBase: controller.NewS3Database(rdb, o, s3db),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
config: config,
|
||||
|
@ -23,29 +23,27 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/group"
|
||||
friendpb "github.com/openimsdk/protocol/relation"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
registry "github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/grpc"
|
||||
@ -57,7 +55,7 @@ type userServer struct {
|
||||
db controller.UserDatabase
|
||||
friendNotificationSender *relation.FriendNotificationSender
|
||||
userNotificationSender *UserNotificationSender
|
||||
RegisterCenter registry.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
webhookClient *webhook.Client
|
||||
groupClient *rpcli.GroupClient
|
||||
@ -76,15 +74,17 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
users := make([]*tablerelation.User, 0)
|
||||
|
||||
for _, v := range config.Share.IMAdminUserID {
|
||||
|
@ -1,47 +1,37 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/robfig/cron/v3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type CronTaskConfig struct {
|
||||
type Config struct {
|
||||
CronTask config.CronTask
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
|
||||
runTimeEnv string
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, conf *CronTaskConfig) error {
|
||||
conf.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", conf.runTimeEnv, "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
||||
func Start(ctx context.Context, conf *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
||||
if conf.CronTask.RetainChatRecords < 1 {
|
||||
return errs.New("msg destruct time must be greater than 1").Wrap()
|
||||
log.ZInfo(ctx, "disable cron")
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(&conf.Discovery, conf.runTimeEnv, nil)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
ctx = mcontext.SetOpUserID(ctx, conf.Share.IMAdminUserID[0])
|
||||
|
||||
msgConn, err := client.GetConn(ctx, conf.Discovery.RpcService.Msg)
|
||||
@ -88,13 +78,15 @@ func Start(ctx context.Context, conf *CronTaskConfig) error {
|
||||
}
|
||||
log.ZDebug(ctx, "start cron task", "CronExecuteTime", conf.CronTask.CronExecuteTime)
|
||||
srv.cron.Start()
|
||||
log.ZDebug(ctx, "cron task server is running")
|
||||
<-ctx.Done()
|
||||
log.ZDebug(ctx, "cron task server is shutting down")
|
||||
return nil
|
||||
}
|
||||
|
||||
type cronServer struct {
|
||||
ctx context.Context
|
||||
config *CronTaskConfig
|
||||
config *Config
|
||||
cron *cron.Cron
|
||||
msgClient msg.MsgClient
|
||||
conversationClient pbconversation.ConversationClient
|
@ -1,4 +1,4 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -24,7 +24,7 @@ func TestName(t *testing.T) {
|
||||
Address: []string{"localhost:12379"},
|
||||
},
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(conf, "source")
|
||||
client, err := kdisc.NewDiscoveryRegister(conf, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) deleteMsg() {
|
@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) clearS3() {
|
@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) clearUserMsg() {
|
@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/api"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
@ -32,7 +33,7 @@ type ApiCmd struct {
|
||||
}
|
||||
|
||||
func NewApiCmd() *ApiCmd {
|
||||
apiConfig := api.Config{AllConfig: &config.AllConfig{}}
|
||||
var apiConfig api.Config
|
||||
ret := &ApiCmd{apiConfig: &apiConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.DiscoveryConfigFilename: &apiConfig.Discovery,
|
||||
@ -61,7 +62,7 @@ func NewApiCmd() *ApiCmd {
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
apiConfig.ConfigPath = ret.configPath
|
||||
apiConfig.ConfigPath = config.Path(ret.configPath)
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
@ -72,5 +73,22 @@ func (a *ApiCmd) Exec() error {
|
||||
}
|
||||
|
||||
func (a *ApiCmd) runE() error {
|
||||
return api.Start(a.ctx, a.Index(), a.apiConfig)
|
||||
a.apiConfig.Index = config.Index(a.Index())
|
||||
prometheus := config.Prometheus{
|
||||
Enable: a.apiConfig.API.Prometheus.Enable,
|
||||
Ports: a.apiConfig.API.Prometheus.Ports,
|
||||
}
|
||||
return startrpc.Start(
|
||||
a.ctx, &a.apiConfig.Discovery,
|
||||
&prometheus,
|
||||
a.apiConfig.API.Api.ListenIP, "",
|
||||
a.apiConfig.API.Prometheus.AutoSetPorts,
|
||||
nil, int(a.apiConfig.Index),
|
||||
a.apiConfig.Discovery.RpcService.MessageGateway,
|
||||
&a.apiConfig.Notification,
|
||||
a.apiConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
api.Start,
|
||||
)
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func NewAuthRpcCmd() *AuthRpcCmd {
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCAuthCfgFileName: &authConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &authConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &authConfig.MongoConfig,
|
||||
config.ShareFileName: &authConfig.Share,
|
||||
config.DiscoveryConfigFilename: &authConfig.Discovery,
|
||||
}
|
||||
|
@ -17,8 +17,9 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/tools"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/tools/cron"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
@ -28,11 +29,11 @@ type CronTaskCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
cronTaskConfig *tools.CronTaskConfig
|
||||
cronTaskConfig *cron.Config
|
||||
}
|
||||
|
||||
func NewCronTaskCmd() *CronTaskCmd {
|
||||
var cronTaskConfig tools.CronTaskConfig
|
||||
var cronTaskConfig cron.Config
|
||||
ret := &CronTaskCmd{cronTaskConfig: &cronTaskConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMCronTaskCfgFileName: &cronTaskConfig.CronTask,
|
||||
@ -52,5 +53,18 @@ func (a *CronTaskCmd) Exec() error {
|
||||
}
|
||||
|
||||
func (a *CronTaskCmd) runE() error {
|
||||
return tools.Start(a.ctx, a.cronTaskConfig)
|
||||
var prometheus config.Prometheus
|
||||
return startrpc.Start(
|
||||
a.ctx, &a.cronTaskConfig.Discovery,
|
||||
&prometheus,
|
||||
"", "",
|
||||
true,
|
||||
nil, 0,
|
||||
"",
|
||||
nil,
|
||||
a.cronTaskConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
cron.Start,
|
||||
)
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/msggateway"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
@ -55,5 +56,20 @@ func (m *MsgGatewayCmd) Exec() error {
|
||||
}
|
||||
|
||||
func (m *MsgGatewayCmd) runE() error {
|
||||
return msggateway.Start(m.ctx, m.Index(), m.msgGatewayConfig)
|
||||
m.msgGatewayConfig.Index = config.Index(m.Index())
|
||||
rpc := m.msgGatewayConfig.MsgGateway.RPC
|
||||
var prometheus config.Prometheus
|
||||
return startrpc.Start(
|
||||
m.ctx, &m.msgGatewayConfig.Discovery,
|
||||
&prometheus,
|
||||
rpc.ListenIP, rpc.RegisterIP,
|
||||
rpc.AutoSetPorts,
|
||||
rpc.Ports, int(m.msgGatewayConfig.Index),
|
||||
m.msgGatewayConfig.Discovery.RpcService.MessageGateway,
|
||||
nil,
|
||||
m.msgGatewayConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
msggateway.Start,
|
||||
)
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
@ -56,5 +57,19 @@ func (m *MsgTransferCmd) Exec() error {
|
||||
}
|
||||
|
||||
func (m *MsgTransferCmd) runE() error {
|
||||
return msgtransfer.Start(m.ctx, m.Index(), m.msgTransferConfig)
|
||||
m.msgTransferConfig.Index = config.Index(m.Index())
|
||||
var prometheus config.Prometheus
|
||||
return startrpc.Start(
|
||||
m.ctx, &m.msgTransferConfig.Discovery,
|
||||
&prometheus,
|
||||
"", "",
|
||||
true,
|
||||
nil, int(m.msgTransferConfig.Index),
|
||||
"",
|
||||
nil,
|
||||
m.msgTransferConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
msgtransfer.Start,
|
||||
)
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func NewPushRpcCmd() *PushRpcCmd {
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMPushCfgFileName: &pushConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &pushConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &pushConfig.MongoConfig,
|
||||
config.KafkaConfigFileName: &pushConfig.KafkaConfig,
|
||||
config.ShareFileName: &pushConfig.Share,
|
||||
config.NotificationFileName: &pushConfig.NotificationConfig,
|
||||
@ -48,7 +49,7 @@ func NewPushRpcCmd() *PushRpcCmd {
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ret.pushConfig.FcmConfigPath = ret.ConfigPath()
|
||||
ret.pushConfig.FcmConfigPath = config.Path(ret.ConfigPath())
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/spf13/cobra"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
@ -86,14 +85,12 @@ func (r *RootCmd) initEtcd() error {
|
||||
return err
|
||||
}
|
||||
disConfig := config.Discovery{}
|
||||
env := runtimeenv.PrintRuntimeEnvironment()
|
||||
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename],
|
||||
env, &disConfig)
|
||||
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename], &disConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if disConfig.Enable == config.ETCD {
|
||||
discov, _ := kdisc.NewDiscoveryRegister(&disConfig, env, nil)
|
||||
discov, _ := kdisc.NewDiscoveryRegister(&disConfig, nil)
|
||||
r.etcdClient = discov.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
}
|
||||
return nil
|
||||
@ -125,18 +122,16 @@ func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) err
|
||||
return err
|
||||
}
|
||||
|
||||
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
// Load common configuration file
|
||||
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
|
||||
for configFileName, configStruct := range opts.configMap {
|
||||
err := config.Load(configDirectory, configFileName, config.EnvPrefixMap[configFileName], runtimeEnv, configStruct)
|
||||
err := config.Load(configDirectory, configFileName, config.EnvPrefixMap[configFileName], configStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Load common log configuration file
|
||||
return config.Load(configDirectory, config.LogConfigFileName, config.EnvPrefixMap[config.LogConfigFileName], runtimeEnv, &r.log)
|
||||
return config.Load(configDirectory, config.LogConfigFileName, config.EnvPrefixMap[config.LogConfigFileName], &r.log)
|
||||
}
|
||||
|
||||
func (r *RootCmd) updateConfigFromEtcd(opts *CmdOpts) error {
|
||||
@ -208,7 +203,6 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
|
||||
|
||||
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
|
||||
err := log.InitLoggerFromConfig(
|
||||
|
||||
cmdOpts.loggerPrefixName,
|
||||
r.processName,
|
||||
"", "",
|
||||
|
@ -28,378 +28,348 @@ import (
|
||||
"github.com/openimsdk/tools/s3/oss"
|
||||
)
|
||||
|
||||
const StructTagName = "yaml"
|
||||
|
||||
type Path string
|
||||
|
||||
type Index int
|
||||
|
||||
type CacheConfig struct {
|
||||
Topic string `mapstructure:"topic"`
|
||||
SlotNum int `mapstructure:"slotNum"`
|
||||
SlotSize int `mapstructure:"slotSize"`
|
||||
SuccessExpire int `mapstructure:"successExpire"`
|
||||
FailedExpire int `mapstructure:"failedExpire"`
|
||||
Topic string `yaml:"topic"`
|
||||
SlotNum int `yaml:"slotNum"`
|
||||
SlotSize int `yaml:"slotSize"`
|
||||
SuccessExpire int `yaml:"successExpire"`
|
||||
FailedExpire int `yaml:"failedExpire"`
|
||||
}
|
||||
|
||||
type LocalCache struct {
|
||||
User CacheConfig `mapstructure:"user"`
|
||||
Group CacheConfig `mapstructure:"group"`
|
||||
Friend CacheConfig `mapstructure:"friend"`
|
||||
Conversation CacheConfig `mapstructure:"conversation"`
|
||||
User CacheConfig `yaml:"user"`
|
||||
Group CacheConfig `yaml:"group"`
|
||||
Friend CacheConfig `yaml:"friend"`
|
||||
Conversation CacheConfig `yaml:"conversation"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
StorageLocation string `mapstructure:"storageLocation"`
|
||||
RotationTime uint `mapstructure:"rotationTime"`
|
||||
RemainRotationCount uint `mapstructure:"remainRotationCount"`
|
||||
RemainLogLevel int `mapstructure:"remainLogLevel"`
|
||||
IsStdout bool `mapstructure:"isStdout"`
|
||||
IsJson bool `mapstructure:"isJson"`
|
||||
IsSimplify bool `mapstructure:"isSimplify"`
|
||||
WithStack bool `mapstructure:"withStack"`
|
||||
StorageLocation string `yaml:"storageLocation"`
|
||||
RotationTime uint `yaml:"rotationTime"`
|
||||
RemainRotationCount uint `yaml:"remainRotationCount"`
|
||||
RemainLogLevel int `yaml:"remainLogLevel"`
|
||||
IsStdout bool `yaml:"isStdout"`
|
||||
IsJson bool `yaml:"isJson"`
|
||||
IsSimplify bool `yaml:"isSimplify"`
|
||||
WithStack bool `yaml:"withStack"`
|
||||
}
|
||||
|
||||
type Minio struct {
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
SecretAccessKey string `mapstructure:"secretAccessKey"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
InternalAddress string `mapstructure:"internalAddress"`
|
||||
ExternalAddress string `mapstructure:"externalAddress"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
SecretAccessKey string `yaml:"secretAccessKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
InternalAddress string `yaml:"internalAddress"`
|
||||
ExternalAddress string `yaml:"externalAddress"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Mongo struct {
|
||||
URI string `mapstructure:"uri"`
|
||||
Address []string `mapstructure:"address"`
|
||||
Database string `mapstructure:"database"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
AuthSource string `mapstructure:"authSource"`
|
||||
MaxPoolSize int `mapstructure:"maxPoolSize"`
|
||||
MaxRetry int `mapstructure:"maxRetry"`
|
||||
URI string `yaml:"uri"`
|
||||
Address []string `yaml:"address"`
|
||||
Database string `yaml:"database"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
AuthSource string `yaml:"authSource"`
|
||||
MaxPoolSize int `yaml:"maxPoolSize"`
|
||||
MaxRetry int `yaml:"maxRetry"`
|
||||
}
|
||||
type Kafka struct {
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
ProducerAck string `mapstructure:"producerAck"`
|
||||
CompressType string `mapstructure:"compressType"`
|
||||
Address []string `mapstructure:"address"`
|
||||
ToRedisTopic string `mapstructure:"toRedisTopic"`
|
||||
ToMongoTopic string `mapstructure:"toMongoTopic"`
|
||||
ToPushTopic string `mapstructure:"toPushTopic"`
|
||||
ToOfflinePushTopic string `mapstructure:"toOfflinePushTopic"`
|
||||
ToRedisGroupID string `mapstructure:"toRedisGroupID"`
|
||||
ToMongoGroupID string `mapstructure:"toMongoGroupID"`
|
||||
ToPushGroupID string `mapstructure:"toPushGroupID"`
|
||||
ToOfflineGroupID string `mapstructure:"toOfflinePushGroupID"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
ProducerAck string `yaml:"producerAck"`
|
||||
CompressType string `yaml:"compressType"`
|
||||
Address []string `yaml:"address"`
|
||||
ToRedisTopic string `yaml:"toRedisTopic"`
|
||||
ToMongoTopic string `yaml:"toMongoTopic"`
|
||||
ToPushTopic string `yaml:"toPushTopic"`
|
||||
ToOfflinePushTopic string `yaml:"toOfflinePushTopic"`
|
||||
ToRedisGroupID string `yaml:"toRedisGroupID"`
|
||||
ToMongoGroupID string `yaml:"toMongoGroupID"`
|
||||
ToPushGroupID string `yaml:"toPushGroupID"`
|
||||
ToOfflineGroupID string `yaml:"toOfflinePushGroupID"`
|
||||
|
||||
Tls TLSConfig `mapstructure:"tls"`
|
||||
Tls TLSConfig `yaml:"tls"`
|
||||
}
|
||||
type TLSConfig struct {
|
||||
EnableTLS bool `mapstructure:"enableTLS"`
|
||||
CACrt string `mapstructure:"caCrt"`
|
||||
ClientCrt string `mapstructure:"clientCrt"`
|
||||
ClientKey string `mapstructure:"clientKey"`
|
||||
ClientKeyPwd string `mapstructure:"clientKeyPwd"`
|
||||
InsecureSkipVerify bool `mapstructure:"insecureSkipVerify"`
|
||||
EnableTLS bool `yaml:"enableTLS"`
|
||||
CACrt string `yaml:"caCrt"`
|
||||
ClientCrt string `yaml:"clientCrt"`
|
||||
ClientKey string `yaml:"clientKey"`
|
||||
ClientKeyPwd string `yaml:"clientKeyPwd"`
|
||||
InsecureSkipVerify bool `yaml:"insecureSkipVerify"`
|
||||
}
|
||||
|
||||
type API struct {
|
||||
Api struct {
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
CompressionLevel int `mapstructure:"compressionLevel"`
|
||||
} `mapstructure:"api"`
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
Ports []int `yaml:"ports"`
|
||||
CompressionLevel int `yaml:"compressionLevel"`
|
||||
} `yaml:"api"`
|
||||
Prometheus struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
GrafanaURL string `mapstructure:"grafanaURL"`
|
||||
} `mapstructure:"prometheus"`
|
||||
Enable bool `yaml:"enable"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
GrafanaURL string `yaml:"grafanaURL"`
|
||||
} `yaml:"prometheus"`
|
||||
}
|
||||
|
||||
type CronTask struct {
|
||||
CronExecuteTime string `mapstructure:"cronExecuteTime"`
|
||||
RetainChatRecords int `mapstructure:"retainChatRecords"`
|
||||
FileExpireTime int `mapstructure:"fileExpireTime"`
|
||||
DeleteObjectType []string `mapstructure:"deleteObjectType"`
|
||||
CronExecuteTime string `yaml:"cronExecuteTime"`
|
||||
RetainChatRecords int `yaml:"retainChatRecords"`
|
||||
FileExpireTime int `yaml:"fileExpireTime"`
|
||||
DeleteObjectType []string `yaml:"deleteObjectType"`
|
||||
}
|
||||
|
||||
type OfflinePushConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Title string `mapstructure:"title"`
|
||||
Desc string `mapstructure:"desc"`
|
||||
Ext string `mapstructure:"ext"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Title string `yaml:"title"`
|
||||
Desc string `yaml:"desc"`
|
||||
Ext string `yaml:"ext"`
|
||||
}
|
||||
|
||||
type NotificationConfig struct {
|
||||
IsSendMsg bool `mapstructure:"isSendMsg"`
|
||||
ReliabilityLevel int `mapstructure:"reliabilityLevel"`
|
||||
UnreadCount bool `mapstructure:"unreadCount"`
|
||||
OfflinePush OfflinePushConfig `mapstructure:"offlinePush"`
|
||||
IsSendMsg bool `yaml:"isSendMsg"`
|
||||
ReliabilityLevel int `yaml:"reliabilityLevel"`
|
||||
UnreadCount bool `yaml:"unreadCount"`
|
||||
OfflinePush OfflinePushConfig `yaml:"offlinePush"`
|
||||
}
|
||||
|
||||
type Notification struct {
|
||||
GroupCreated NotificationConfig `mapstructure:"groupCreated"`
|
||||
GroupInfoSet NotificationConfig `mapstructure:"groupInfoSet"`
|
||||
JoinGroupApplication NotificationConfig `mapstructure:"joinGroupApplication"`
|
||||
MemberQuit NotificationConfig `mapstructure:"memberQuit"`
|
||||
GroupApplicationAccepted NotificationConfig `mapstructure:"groupApplicationAccepted"`
|
||||
GroupApplicationRejected NotificationConfig `mapstructure:"groupApplicationRejected"`
|
||||
GroupOwnerTransferred NotificationConfig `mapstructure:"groupOwnerTransferred"`
|
||||
MemberKicked NotificationConfig `mapstructure:"memberKicked"`
|
||||
MemberInvited NotificationConfig `mapstructure:"memberInvited"`
|
||||
MemberEnter NotificationConfig `mapstructure:"memberEnter"`
|
||||
GroupDismissed NotificationConfig `mapstructure:"groupDismissed"`
|
||||
GroupMuted NotificationConfig `mapstructure:"groupMuted"`
|
||||
GroupCancelMuted NotificationConfig `mapstructure:"groupCancelMuted"`
|
||||
GroupMemberMuted NotificationConfig `mapstructure:"groupMemberMuted"`
|
||||
GroupMemberCancelMuted NotificationConfig `mapstructure:"groupMemberCancelMuted"`
|
||||
GroupMemberInfoSet NotificationConfig `mapstructure:"groupMemberInfoSet"`
|
||||
GroupCreated NotificationConfig `yaml:"groupCreated"`
|
||||
GroupInfoSet NotificationConfig `yaml:"groupInfoSet"`
|
||||
JoinGroupApplication NotificationConfig `yaml:"joinGroupApplication"`
|
||||
MemberQuit NotificationConfig `yaml:"memberQuit"`
|
||||
GroupApplicationAccepted NotificationConfig `yaml:"groupApplicationAccepted"`
|
||||
GroupApplicationRejected NotificationConfig `yaml:"groupApplicationRejected"`
|
||||
GroupOwnerTransferred NotificationConfig `yaml:"groupOwnerTransferred"`
|
||||
MemberKicked NotificationConfig `yaml:"memberKicked"`
|
||||
MemberInvited NotificationConfig `yaml:"memberInvited"`
|
||||
MemberEnter NotificationConfig `yaml:"memberEnter"`
|
||||
GroupDismissed NotificationConfig `yaml:"groupDismissed"`
|
||||
GroupMuted NotificationConfig `yaml:"groupMuted"`
|
||||
GroupCancelMuted NotificationConfig `yaml:"groupCancelMuted"`
|
||||
GroupMemberMuted NotificationConfig `yaml:"groupMemberMuted"`
|
||||
GroupMemberCancelMuted NotificationConfig `yaml:"groupMemberCancelMuted"`
|
||||
GroupMemberInfoSet NotificationConfig `yaml:"groupMemberInfoSet"`
|
||||
GroupMemberSetToAdmin NotificationConfig `yaml:"groupMemberSetToAdmin"`
|
||||
GroupMemberSetToOrdinary NotificationConfig `yaml:"groupMemberSetToOrdinaryUser"`
|
||||
GroupInfoSetAnnouncement NotificationConfig `mapstructure:"groupInfoSetAnnouncement"`
|
||||
GroupInfoSetName NotificationConfig `mapstructure:"groupInfoSetName"`
|
||||
FriendApplicationAdded NotificationConfig `mapstructure:"friendApplicationAdded"`
|
||||
FriendApplicationApproved NotificationConfig `mapstructure:"friendApplicationApproved"`
|
||||
FriendApplicationRejected NotificationConfig `mapstructure:"friendApplicationRejected"`
|
||||
FriendAdded NotificationConfig `mapstructure:"friendAdded"`
|
||||
FriendDeleted NotificationConfig `mapstructure:"friendDeleted"`
|
||||
FriendRemarkSet NotificationConfig `mapstructure:"friendRemarkSet"`
|
||||
BlackAdded NotificationConfig `mapstructure:"blackAdded"`
|
||||
BlackDeleted NotificationConfig `mapstructure:"blackDeleted"`
|
||||
FriendInfoUpdated NotificationConfig `mapstructure:"friendInfoUpdated"`
|
||||
UserInfoUpdated NotificationConfig `mapstructure:"userInfoUpdated"`
|
||||
UserStatusChanged NotificationConfig `mapstructure:"userStatusChanged"`
|
||||
ConversationChanged NotificationConfig `mapstructure:"conversationChanged"`
|
||||
ConversationSetPrivate NotificationConfig `mapstructure:"conversationSetPrivate"`
|
||||
GroupInfoSetAnnouncement NotificationConfig `yaml:"groupInfoSetAnnouncement"`
|
||||
GroupInfoSetName NotificationConfig `yaml:"groupInfoSetName"`
|
||||
FriendApplicationAdded NotificationConfig `yaml:"friendApplicationAdded"`
|
||||
FriendApplicationApproved NotificationConfig `yaml:"friendApplicationApproved"`
|
||||
FriendApplicationRejected NotificationConfig `yaml:"friendApplicationRejected"`
|
||||
FriendAdded NotificationConfig `yaml:"friendAdded"`
|
||||
FriendDeleted NotificationConfig `yaml:"friendDeleted"`
|
||||
FriendRemarkSet NotificationConfig `yaml:"friendRemarkSet"`
|
||||
BlackAdded NotificationConfig `yaml:"blackAdded"`
|
||||
BlackDeleted NotificationConfig `yaml:"blackDeleted"`
|
||||
FriendInfoUpdated NotificationConfig `yaml:"friendInfoUpdated"`
|
||||
UserInfoUpdated NotificationConfig `yaml:"userInfoUpdated"`
|
||||
UserStatusChanged NotificationConfig `yaml:"userStatusChanged"`
|
||||
ConversationChanged NotificationConfig `yaml:"conversationChanged"`
|
||||
ConversationSetPrivate NotificationConfig `yaml:"conversationSetPrivate"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Ports []int `yaml:"ports"`
|
||||
}
|
||||
|
||||
type MsgGateway struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
LongConnSvr struct {
|
||||
Ports []int `mapstructure:"ports"`
|
||||
WebsocketMaxConnNum int `mapstructure:"websocketMaxConnNum"`
|
||||
WebsocketMaxMsgLen int `mapstructure:"websocketMaxMsgLen"`
|
||||
WebsocketTimeout int `mapstructure:"websocketTimeout"`
|
||||
} `mapstructure:"longConnSvr"`
|
||||
Ports []int `yaml:"ports"`
|
||||
WebsocketMaxConnNum int `yaml:"websocketMaxConnNum"`
|
||||
WebsocketMaxMsgLen int `yaml:"websocketMaxMsgLen"`
|
||||
WebsocketTimeout int `yaml:"websocketTimeout"`
|
||||
} `yaml:"longConnSvr"`
|
||||
}
|
||||
|
||||
type MsgTransfer struct {
|
||||
Prometheus struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"prometheus"`
|
||||
Enable bool `yaml:"enable"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
} `yaml:"prometheus"`
|
||||
}
|
||||
|
||||
type Push struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
MaxConcurrentWorkers int `mapstructure:"maxConcurrentWorkers"`
|
||||
Enable string `mapstructure:"enable"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
MaxConcurrentWorkers int `yaml:"maxConcurrentWorkers"`
|
||||
Enable string `yaml:"enable"`
|
||||
GeTui struct {
|
||||
PushUrl string `mapstructure:"pushUrl"`
|
||||
MasterSecret string `mapstructure:"masterSecret"`
|
||||
AppKey string `mapstructure:"appKey"`
|
||||
Intent string `mapstructure:"intent"`
|
||||
ChannelID string `mapstructure:"channelID"`
|
||||
ChannelName string `mapstructure:"channelName"`
|
||||
} `mapstructure:"geTui"`
|
||||
PushUrl string `yaml:"pushUrl"`
|
||||
MasterSecret string `yaml:"masterSecret"`
|
||||
AppKey string `yaml:"appKey"`
|
||||
Intent string `yaml:"intent"`
|
||||
ChannelID string `yaml:"channelID"`
|
||||
ChannelName string `yaml:"channelName"`
|
||||
} `yaml:"geTui"`
|
||||
FCM struct {
|
||||
FilePath string `mapstructure:"filePath"`
|
||||
AuthURL string `mapstructure:"authURL"`
|
||||
} `mapstructure:"fcm"`
|
||||
FilePath string `yaml:"filePath"`
|
||||
AuthURL string `yaml:"authURL"`
|
||||
} `yaml:"fcm"`
|
||||
JPush struct {
|
||||
AppKey string `mapstructure:"appKey"`
|
||||
MasterSecret string `mapstructure:"masterSecret"`
|
||||
PushURL string `mapstructure:"pushURL"`
|
||||
PushIntent string `mapstructure:"pushIntent"`
|
||||
} `mapstructure:"jpush"`
|
||||
AppKey string `yaml:"appKey"`
|
||||
MasterSecret string `yaml:"masterSecret"`
|
||||
PushURL string `yaml:"pushURL"`
|
||||
PushIntent string `yaml:"pushIntent"`
|
||||
} `yaml:"jpush"`
|
||||
IOSPush struct {
|
||||
PushSound string `mapstructure:"pushSound"`
|
||||
BadgeCount bool `mapstructure:"badgeCount"`
|
||||
Production bool `mapstructure:"production"`
|
||||
} `mapstructure:"iosPush"`
|
||||
FullUserCache bool `mapstructure:"fullUserCache"`
|
||||
PushSound string `yaml:"pushSound"`
|
||||
BadgeCount bool `yaml:"badgeCount"`
|
||||
Production bool `yaml:"production"`
|
||||
} `yaml:"iosPush"`
|
||||
FullUserCache bool `yaml:"fullUserCache"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
TokenPolicy struct {
|
||||
Expire int64 `mapstructure:"expire"`
|
||||
} `mapstructure:"tokenPolicy"`
|
||||
Expire int64 `yaml:"expire"`
|
||||
} `yaml:"tokenPolicy"`
|
||||
}
|
||||
|
||||
type Conversation struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
}
|
||||
|
||||
type Friend struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
EnableHistoryForNewMembers bool `mapstructure:"enableHistoryForNewMembers"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
EnableHistoryForNewMembers bool `yaml:"enableHistoryForNewMembers"`
|
||||
}
|
||||
|
||||
type Msg struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
FriendVerify bool `mapstructure:"friendVerify"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
FriendVerify bool `yaml:"friendVerify"`
|
||||
}
|
||||
|
||||
type Third struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
Object struct {
|
||||
Enable string `mapstructure:"enable"`
|
||||
Cos Cos `mapstructure:"cos"`
|
||||
Oss Oss `mapstructure:"oss"`
|
||||
Kodo Kodo `mapstructure:"kodo"`
|
||||
Aws Aws `mapstructure:"aws"`
|
||||
} `mapstructure:"object"`
|
||||
Enable string `yaml:"enable"`
|
||||
Cos Cos `yaml:"cos"`
|
||||
Oss Oss `yaml:"oss"`
|
||||
Kodo Kodo `yaml:"kodo"`
|
||||
Aws Aws `yaml:"aws"`
|
||||
} `yaml:"object"`
|
||||
}
|
||||
type Cos struct {
|
||||
BucketURL string `mapstructure:"bucketURL"`
|
||||
SecretID string `mapstructure:"secretID"`
|
||||
SecretKey string `mapstructure:"secretKey"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
SecretID string `yaml:"secretID"`
|
||||
SecretKey string `yaml:"secretKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
type Oss struct {
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
BucketURL string `mapstructure:"bucketURL"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
AccessKeySecret string `mapstructure:"accessKeySecret"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Kodo struct {
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
BucketURL string `mapstructure:"bucketURL"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
AccessKeySecret string `mapstructure:"accessKeySecret"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Aws struct {
|
||||
Region string `mapstructure:"region"`
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
SecretAccessKey string `mapstructure:"secretAccessKey"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
Region string `yaml:"region"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
SecretAccessKey string `yaml:"secretAccessKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
ListenIP string `mapstructure:"listenIP"`
|
||||
AutoSetPorts bool `mapstructure:"autoSetPorts"`
|
||||
Ports []int `mapstructure:"ports"`
|
||||
} `mapstructure:"rpc"`
|
||||
Prometheus Prometheus `mapstructure:"prometheus"`
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
}
|
||||
|
||||
type RPC struct {
|
||||
RegisterIP string `yaml:"registerIP"`
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
}
|
||||
|
||||
type Redis struct {
|
||||
Address []string `mapstructure:"address"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
ClusterMode bool `mapstructure:"clusterMode"`
|
||||
DB int `mapstructure:"storage"`
|
||||
MaxRetry int `mapstructure:"maxRetry"`
|
||||
PoolSize int `mapstructure:"poolSize"`
|
||||
Disable bool `yaml:"-"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
ClusterMode bool `yaml:"clusterMode"`
|
||||
DB int `yaml:"storage"`
|
||||
MaxRetry int `yaml:"maxRetry"`
|
||||
PoolSize int `yaml:"poolSize"`
|
||||
}
|
||||
|
||||
type BeforeConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Timeout int `mapstructure:"timeout"`
|
||||
FailedContinue bool `mapstructure:"failedContinue"`
|
||||
AllowedTypes []string `mapstructure:"allowedTypes"`
|
||||
DeniedTypes []string `mapstructure:"deniedTypes"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Timeout int `yaml:"timeout"`
|
||||
FailedContinue bool `yaml:"failedContinue"`
|
||||
AllowedTypes []string `yaml:"allowedTypes"`
|
||||
DeniedTypes []string `yaml:"deniedTypes"`
|
||||
}
|
||||
|
||||
type AfterConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
Timeout int `mapstructure:"timeout"`
|
||||
AttentionIds []string `mapstructure:"attentionIds"`
|
||||
AllowedTypes []string `mapstructure:"allowedTypes"`
|
||||
DeniedTypes []string `mapstructure:"deniedTypes"`
|
||||
Enable bool `yaml:"enable"`
|
||||
Timeout int `yaml:"timeout"`
|
||||
AttentionIds []string `yaml:"attentionIds"`
|
||||
AllowedTypes []string `yaml:"allowedTypes"`
|
||||
DeniedTypes []string `yaml:"deniedTypes"`
|
||||
}
|
||||
|
||||
type Share struct {
|
||||
Secret string `mapstructure:"secret"`
|
||||
IMAdminUserID []string `mapstructure:"imAdminUserID"`
|
||||
MultiLogin MultiLogin `mapstructure:"multiLogin"`
|
||||
Secret string `yaml:"secret"`
|
||||
IMAdminUserID []string `yaml:"imAdminUserID"`
|
||||
MultiLogin MultiLogin `yaml:"multiLogin"`
|
||||
}
|
||||
|
||||
type MultiLogin struct {
|
||||
Policy int `mapstructure:"policy"`
|
||||
MaxNumOneEnd int `mapstructure:"maxNumOneEnd"`
|
||||
Policy int `yaml:"policy"`
|
||||
MaxNumOneEnd int `yaml:"maxNumOneEnd"`
|
||||
}
|
||||
|
||||
type RpcService struct {
|
||||
User string `mapstructure:"user"`
|
||||
Friend string `mapstructure:"friend"`
|
||||
Msg string `mapstructure:"msg"`
|
||||
Push string `mapstructure:"push"`
|
||||
MessageGateway string `mapstructure:"messageGateway"`
|
||||
Group string `mapstructure:"group"`
|
||||
Auth string `mapstructure:"auth"`
|
||||
Conversation string `mapstructure:"conversation"`
|
||||
Third string `mapstructure:"third"`
|
||||
User string `yaml:"user"`
|
||||
Friend string `yaml:"friend"`
|
||||
Msg string `yaml:"msg"`
|
||||
Push string `yaml:"push"`
|
||||
MessageGateway string `yaml:"messageGateway"`
|
||||
Group string `yaml:"group"`
|
||||
Auth string `yaml:"auth"`
|
||||
Conversation string `yaml:"conversation"`
|
||||
Third string `yaml:"third"`
|
||||
}
|
||||
|
||||
func (r *RpcService) GetServiceNames() []string {
|
||||
@ -418,80 +388,80 @@ func (r *RpcService) GetServiceNames() []string {
|
||||
|
||||
// FullConfig stores all configurations for before and after events
|
||||
type Webhooks struct {
|
||||
URL string `mapstructure:"url"`
|
||||
BeforeSendSingleMsg BeforeConfig `mapstructure:"beforeSendSingleMsg"`
|
||||
BeforeUpdateUserInfoEx BeforeConfig `mapstructure:"beforeUpdateUserInfoEx"`
|
||||
AfterUpdateUserInfoEx AfterConfig `mapstructure:"afterUpdateUserInfoEx"`
|
||||
AfterSendSingleMsg AfterConfig `mapstructure:"afterSendSingleMsg"`
|
||||
BeforeSendGroupMsg BeforeConfig `mapstructure:"beforeSendGroupMsg"`
|
||||
BeforeMsgModify BeforeConfig `mapstructure:"beforeMsgModify"`
|
||||
AfterSendGroupMsg AfterConfig `mapstructure:"afterSendGroupMsg"`
|
||||
AfterUserOnline AfterConfig `mapstructure:"afterUserOnline"`
|
||||
AfterUserOffline AfterConfig `mapstructure:"afterUserOffline"`
|
||||
AfterUserKickOff AfterConfig `mapstructure:"afterUserKickOff"`
|
||||
BeforeOfflinePush BeforeConfig `mapstructure:"beforeOfflinePush"`
|
||||
BeforeOnlinePush BeforeConfig `mapstructure:"beforeOnlinePush"`
|
||||
BeforeGroupOnlinePush BeforeConfig `mapstructure:"beforeGroupOnlinePush"`
|
||||
BeforeAddFriend BeforeConfig `mapstructure:"beforeAddFriend"`
|
||||
BeforeUpdateUserInfo BeforeConfig `mapstructure:"beforeUpdateUserInfo"`
|
||||
AfterUpdateUserInfo AfterConfig `mapstructure:"afterUpdateUserInfo"`
|
||||
BeforeCreateGroup BeforeConfig `mapstructure:"beforeCreateGroup"`
|
||||
AfterCreateGroup AfterConfig `mapstructure:"afterCreateGroup"`
|
||||
BeforeMemberJoinGroup BeforeConfig `mapstructure:"beforeMemberJoinGroup"`
|
||||
BeforeSetGroupMemberInfo BeforeConfig `mapstructure:"beforeSetGroupMemberInfo"`
|
||||
AfterSetGroupMemberInfo AfterConfig `mapstructure:"afterSetGroupMemberInfo"`
|
||||
AfterQuitGroup AfterConfig `mapstructure:"afterQuitGroup"`
|
||||
AfterKickGroupMember AfterConfig `mapstructure:"afterKickGroupMember"`
|
||||
AfterDismissGroup AfterConfig `mapstructure:"afterDismissGroup"`
|
||||
BeforeApplyJoinGroup BeforeConfig `mapstructure:"beforeApplyJoinGroup"`
|
||||
AfterGroupMsgRead AfterConfig `mapstructure:"afterGroupMsgRead"`
|
||||
AfterSingleMsgRead AfterConfig `mapstructure:"afterSingleMsgRead"`
|
||||
BeforeUserRegister BeforeConfig `mapstructure:"beforeUserRegister"`
|
||||
AfterUserRegister AfterConfig `mapstructure:"afterUserRegister"`
|
||||
AfterTransferGroupOwner AfterConfig `mapstructure:"afterTransferGroupOwner"`
|
||||
BeforeSetFriendRemark BeforeConfig `mapstructure:"beforeSetFriendRemark"`
|
||||
AfterSetFriendRemark AfterConfig `mapstructure:"afterSetFriendRemark"`
|
||||
AfterGroupMsgRevoke AfterConfig `mapstructure:"afterGroupMsgRevoke"`
|
||||
AfterJoinGroup AfterConfig `mapstructure:"afterJoinGroup"`
|
||||
BeforeInviteUserToGroup BeforeConfig `mapstructure:"beforeInviteUserToGroup"`
|
||||
AfterSetGroupInfo AfterConfig `mapstructure:"afterSetGroupInfo"`
|
||||
BeforeSetGroupInfo BeforeConfig `mapstructure:"beforeSetGroupInfo"`
|
||||
AfterSetGroupInfoEx AfterConfig `mapstructure:"afterSetGroupInfoEx"`
|
||||
BeforeSetGroupInfoEx BeforeConfig `mapstructure:"beforeSetGroupInfoEx"`
|
||||
AfterRevokeMsg AfterConfig `mapstructure:"afterRevokeMsg"`
|
||||
BeforeAddBlack BeforeConfig `mapstructure:"beforeAddBlack"`
|
||||
AfterAddFriend AfterConfig `mapstructure:"afterAddFriend"`
|
||||
BeforeAddFriendAgree BeforeConfig `mapstructure:"beforeAddFriendAgree"`
|
||||
AfterAddFriendAgree AfterConfig `mapstructure:"afterAddFriendAgree"`
|
||||
AfterDeleteFriend AfterConfig `mapstructure:"afterDeleteFriend"`
|
||||
BeforeImportFriends BeforeConfig `mapstructure:"beforeImportFriends"`
|
||||
AfterImportFriends AfterConfig `mapstructure:"afterImportFriends"`
|
||||
AfterRemoveBlack AfterConfig `mapstructure:"afterRemoveBlack"`
|
||||
URL string `yaml:"url"`
|
||||
BeforeSendSingleMsg BeforeConfig `yaml:"beforeSendSingleMsg"`
|
||||
BeforeUpdateUserInfoEx BeforeConfig `yaml:"beforeUpdateUserInfoEx"`
|
||||
AfterUpdateUserInfoEx AfterConfig `yaml:"afterUpdateUserInfoEx"`
|
||||
AfterSendSingleMsg AfterConfig `yaml:"afterSendSingleMsg"`
|
||||
BeforeSendGroupMsg BeforeConfig `yaml:"beforeSendGroupMsg"`
|
||||
BeforeMsgModify BeforeConfig `yaml:"beforeMsgModify"`
|
||||
AfterSendGroupMsg AfterConfig `yaml:"afterSendGroupMsg"`
|
||||
AfterUserOnline AfterConfig `yaml:"afterUserOnline"`
|
||||
AfterUserOffline AfterConfig `yaml:"afterUserOffline"`
|
||||
AfterUserKickOff AfterConfig `yaml:"afterUserKickOff"`
|
||||
BeforeOfflinePush BeforeConfig `yaml:"beforeOfflinePush"`
|
||||
BeforeOnlinePush BeforeConfig `yaml:"beforeOnlinePush"`
|
||||
BeforeGroupOnlinePush BeforeConfig `yaml:"beforeGroupOnlinePush"`
|
||||
BeforeAddFriend BeforeConfig `yaml:"beforeAddFriend"`
|
||||
BeforeUpdateUserInfo BeforeConfig `yaml:"beforeUpdateUserInfo"`
|
||||
AfterUpdateUserInfo AfterConfig `yaml:"afterUpdateUserInfo"`
|
||||
BeforeCreateGroup BeforeConfig `yaml:"beforeCreateGroup"`
|
||||
AfterCreateGroup AfterConfig `yaml:"afterCreateGroup"`
|
||||
BeforeMemberJoinGroup BeforeConfig `yaml:"beforeMemberJoinGroup"`
|
||||
BeforeSetGroupMemberInfo BeforeConfig `yaml:"beforeSetGroupMemberInfo"`
|
||||
AfterSetGroupMemberInfo AfterConfig `yaml:"afterSetGroupMemberInfo"`
|
||||
AfterQuitGroup AfterConfig `yaml:"afterQuitGroup"`
|
||||
AfterKickGroupMember AfterConfig `yaml:"afterKickGroupMember"`
|
||||
AfterDismissGroup AfterConfig `yaml:"afterDismissGroup"`
|
||||
BeforeApplyJoinGroup BeforeConfig `yaml:"beforeApplyJoinGroup"`
|
||||
AfterGroupMsgRead AfterConfig `yaml:"afterGroupMsgRead"`
|
||||
AfterSingleMsgRead AfterConfig `yaml:"afterSingleMsgRead"`
|
||||
BeforeUserRegister BeforeConfig `yaml:"beforeUserRegister"`
|
||||
AfterUserRegister AfterConfig `yaml:"afterUserRegister"`
|
||||
AfterTransferGroupOwner AfterConfig `yaml:"afterTransferGroupOwner"`
|
||||
BeforeSetFriendRemark BeforeConfig `yaml:"beforeSetFriendRemark"`
|
||||
AfterSetFriendRemark AfterConfig `yaml:"afterSetFriendRemark"`
|
||||
AfterGroupMsgRevoke AfterConfig `yaml:"afterGroupMsgRevoke"`
|
||||
AfterJoinGroup AfterConfig `yaml:"afterJoinGroup"`
|
||||
BeforeInviteUserToGroup BeforeConfig `yaml:"beforeInviteUserToGroup"`
|
||||
AfterSetGroupInfo AfterConfig `yaml:"afterSetGroupInfo"`
|
||||
BeforeSetGroupInfo BeforeConfig `yaml:"beforeSetGroupInfo"`
|
||||
AfterSetGroupInfoEx AfterConfig `yaml:"afterSetGroupInfoEx"`
|
||||
BeforeSetGroupInfoEx BeforeConfig `yaml:"beforeSetGroupInfoEx"`
|
||||
AfterRevokeMsg AfterConfig `yaml:"afterRevokeMsg"`
|
||||
BeforeAddBlack BeforeConfig `yaml:"beforeAddBlack"`
|
||||
AfterAddFriend AfterConfig `yaml:"afterAddFriend"`
|
||||
BeforeAddFriendAgree BeforeConfig `yaml:"beforeAddFriendAgree"`
|
||||
AfterAddFriendAgree AfterConfig `yaml:"afterAddFriendAgree"`
|
||||
AfterDeleteFriend AfterConfig `yaml:"afterDeleteFriend"`
|
||||
BeforeImportFriends BeforeConfig `yaml:"beforeImportFriends"`
|
||||
AfterImportFriends AfterConfig `yaml:"afterImportFriends"`
|
||||
AfterRemoveBlack AfterConfig `yaml:"afterRemoveBlack"`
|
||||
}
|
||||
|
||||
type ZooKeeper struct {
|
||||
Schema string `mapstructure:"schema"`
|
||||
Address []string `mapstructure:"address"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
Schema string `yaml:"schema"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
type Discovery struct {
|
||||
Enable string `mapstructure:"enable"`
|
||||
Etcd Etcd `mapstructure:"etcd"`
|
||||
Kubernetes Kubernetes `mapstructure:"kubernetes"`
|
||||
RpcService RpcService `mapstructure:"rpcService"`
|
||||
Enable string `yaml:"enable"`
|
||||
Etcd Etcd `yaml:"etcd"`
|
||||
Kubernetes Kubernetes `yaml:"kubernetes"`
|
||||
RpcService RpcService `yaml:"rpcService"`
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
type Etcd struct {
|
||||
RootDirectory string `mapstructure:"rootDirectory"`
|
||||
Address []string `mapstructure:"address"`
|
||||
Username string `mapstructure:"username"`
|
||||
Password string `mapstructure:"password"`
|
||||
RootDirectory string `yaml:"rootDirectory"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
func (m *Mongo) Build() *mongoutil.Config {
|
||||
@ -783,7 +753,7 @@ func (a *AllConfig) GetConfigNames() []string {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
const (
|
||||
FileName = "config.yaml"
|
||||
DiscoveryConfigFilename = "discovery.yml"
|
||||
KafkaConfigFileName = "kafka.yml"
|
||||
|
@ -14,13 +14,16 @@
|
||||
|
||||
package config
|
||||
|
||||
import "github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
const ConfKey = "conf"
|
||||
|
||||
const (
|
||||
MountConfigFilePath = "CONFIG_PATH"
|
||||
DeploymentType = "DEPLOYMENT_TYPE"
|
||||
KUBERNETES = "kubernetes"
|
||||
KUBERNETES = runtimeenv.Kubernetes
|
||||
ETCD = "etcd"
|
||||
//Standalone = "standalone"
|
||||
)
|
||||
|
||||
const (
|
||||
|
11
pkg/common/config/global.go
Normal file
11
pkg/common/config/global.go
Normal file
@ -0,0 +1,11 @@
|
||||
package config
|
||||
|
||||
var standalone bool
|
||||
|
||||
func SetStandalone() {
|
||||
standalone = true
|
||||
}
|
||||
|
||||
func Standalone() bool {
|
||||
return standalone
|
||||
}
|
@ -7,11 +7,12 @@ import (
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func Load(configDirectory string, configFileName string, envPrefix string, runtimeEnv string, config any) error {
|
||||
if runtimeEnv == KUBERNETES {
|
||||
func Load(configDirectory string, configFileName string, envPrefix string, config any) error {
|
||||
if runtimeenv.RuntimeEnvironment() == KUBERNETES {
|
||||
mountPath := os.Getenv(MountConfigFilePath)
|
||||
if mountPath == "" {
|
||||
return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty")
|
||||
@ -35,7 +36,7 @@ func loadConfig(path string, envPrefix string, config any) error {
|
||||
}
|
||||
|
||||
if err := v.Unmarshal(config, func(config *mapstructure.DecoderConfig) {
|
||||
config.TagName = "mapstructure"
|
||||
config.TagName = StructTagName
|
||||
}); err != nil {
|
||||
return errs.WrapMsg(err, "failed to unmarshal config", "path", path, "envPrefix", envPrefix)
|
||||
}
|
||||
|
@ -19,6 +19,8 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/standalone"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/openimsdk/tools/discovery/kubernetes"
|
||||
@ -28,8 +30,11 @@ import (
|
||||
)
|
||||
|
||||
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
|
||||
func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string, watchNames []string) (discovery.SvcDiscoveryRegistry, error) {
|
||||
if runtimeEnv == config.KUBERNETES {
|
||||
func NewDiscoveryRegister(discovery *config.Discovery, watchNames []string) (discovery.SvcDiscoveryRegistry, error) {
|
||||
if config.Standalone() {
|
||||
return standalone.GetSvcDiscoveryRegistry(), nil
|
||||
}
|
||||
if runtimeenv.RuntimeEnvironment() == config.KUBERNETES {
|
||||
return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace,
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallSendMsgSize(1024*1024*20),
|
||||
|
@ -1,10 +1,11 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -24,6 +25,10 @@ var (
|
||||
)
|
||||
)
|
||||
|
||||
func RegistryApi() {
|
||||
registry.MustRegister(apiCounter, httpCounter)
|
||||
}
|
||||
|
||||
func ApiInit(listener net.Listener) error {
|
||||
apiRegistry := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
@ -41,9 +46,3 @@ func APICall(path string, method string, apiCode int) {
|
||||
func HttpCall(path string, method string, status int) {
|
||||
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
|
||||
}
|
||||
|
||||
//func ApiHandler() http.Handler {
|
||||
// return promhttp.InstrumentMetricHandler(
|
||||
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
|
||||
// )
|
||||
//}
|
||||
|
@ -1,31 +0,0 @@
|
||||
package prommetrics
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
APIKeyName = "api"
|
||||
MessageTransferKeyName = "message-transfer"
|
||||
)
|
||||
|
||||
type Target struct {
|
||||
Target string `json:"target"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
type RespTarget struct {
|
||||
Targets []string `json:"targets"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
func BuildDiscoveryKey(name string) string {
|
||||
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
|
||||
}
|
||||
|
||||
func BuildDefaultTarget(host string, ip int) Target {
|
||||
return Target{
|
||||
Target: fmt.Sprintf("%s:%d", host, ip),
|
||||
Labels: map[string]string{
|
||||
"namespace": "default",
|
||||
},
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics // import "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
@ -24,3 +24,7 @@ var (
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryAuth() {
|
||||
registry.MustRegister(UserLoginCounter)
|
||||
}
|
||||
|
@ -36,3 +36,12 @@ var (
|
||||
Help: "The number of group chat msg failed processed",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryMsg() {
|
||||
registry.MustRegister(
|
||||
SingleChatMsgProcessSuccessCounter,
|
||||
SingleChatMsgProcessFailedCounter,
|
||||
GroupChatMsgProcessSuccessCounter,
|
||||
GroupChatMsgProcessFailedCounter,
|
||||
)
|
||||
}
|
||||
|
@ -24,3 +24,7 @@ var (
|
||||
Help: "The number of online user num",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryMsgGateway() {
|
||||
registry.MustRegister(OnlineUserGauge)
|
||||
}
|
||||
|
@ -28,3 +28,10 @@ var (
|
||||
Help: "The number of messages with a push time exceeding 10 seconds",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryPush() {
|
||||
registry.MustRegister(
|
||||
MsgOfflinePushFailedCounter,
|
||||
MsgLoneTimePushCounter,
|
||||
)
|
||||
}
|
||||
|
@ -8,3 +8,7 @@ var (
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryUser() {
|
||||
registry.MustRegister(UserRegisterCounter)
|
||||
}
|
||||
|
@ -15,14 +15,42 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
const commonPath = "/metrics"
|
||||
|
||||
var registry = &prometheusRegistry{prometheus.NewRegistry()}
|
||||
|
||||
type prometheusRegistry struct {
|
||||
*prometheus.Registry
|
||||
}
|
||||
|
||||
func (x *prometheusRegistry) MustRegister(cs ...prometheus.Collector) {
|
||||
for _, c := range cs {
|
||||
if err := x.Registry.Register(c); err != nil {
|
||||
if errors.As(err, &prometheus.AlreadyRegisteredError{}) {
|
||||
continue
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
registry.MustRegister(
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
baseCollector = []prometheus.Collector{
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
@ -36,3 +64,48 @@ func Init(registry *prometheus.Registry, listener net.Listener, path string, han
|
||||
srv.Handle(path, handler)
|
||||
return http.Serve(listener, srv)
|
||||
}
|
||||
|
||||
func RegistryAll() {
|
||||
RegistryApi()
|
||||
RegistryAuth()
|
||||
RegistryMsg()
|
||||
RegistryMsgGateway()
|
||||
RegistryPush()
|
||||
RegistryUser()
|
||||
RegistryRpc()
|
||||
RegistryTransfer()
|
||||
}
|
||||
|
||||
func Start(listener net.Listener) error {
|
||||
srv := http.NewServeMux()
|
||||
srv.Handle(commonPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
|
||||
return http.Serve(listener, srv)
|
||||
}
|
||||
|
||||
const (
|
||||
APIKeyName = "api"
|
||||
MessageTransferKeyName = "message-transfer"
|
||||
)
|
||||
|
||||
type Target struct {
|
||||
Target string `json:"target"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
type RespTarget struct {
|
||||
Targets []string `json:"targets"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
func BuildDiscoveryKey(name string) string {
|
||||
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
|
||||
}
|
||||
|
||||
func BuildDefaultTarget(host string, ip int) Target {
|
||||
return Target{
|
||||
Target: fmt.Sprintf("%s:%d", host, ip),
|
||||
Labels: map[string]string{
|
||||
"namespace": "default",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
package prommetrics
|
||||
|
||||
import "testing"
|
||||
|
||||
//func TestNewGrpcPromObj(t *testing.T) {
|
||||
// // Create a custom metric to pass into the NewGrpcPromObj function.
|
||||
// customMetric := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
@ -67,3 +69,9 @@ package prommetrics
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
RegistryApi()
|
||||
RegistryApi()
|
||||
|
||||
}
|
||||
|
@ -1,12 +1,13 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const rpcPath = commonPath
|
||||
@ -22,6 +23,10 @@ var (
|
||||
)
|
||||
)
|
||||
|
||||
func RegistryRpc() {
|
||||
registry.MustRegister(rpcCounter)
|
||||
}
|
||||
|
||||
func RpcInit(cs []prometheus.Collector, listener net.Listener) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs = append(append(
|
||||
|
@ -15,9 +15,10 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -43,6 +44,16 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryTransfer() {
|
||||
registry.MustRegister(
|
||||
MsgInsertRedisSuccessCounter,
|
||||
MsgInsertRedisFailedCounter,
|
||||
MsgInsertMongoSuccessCounter,
|
||||
MsgInsertMongoFailedCounter,
|
||||
SeqSetFailedCounter,
|
||||
)
|
||||
}
|
||||
|
||||
func TransferInit(listener net.Listener) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
|
@ -1,15 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redispubsub // import "github.com/openimsdk/open-im-server/v3/pkg/common/redispubsub"
|
@ -1,30 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redispubsub
|
||||
|
||||
import "github.com/redis/go-redis/v9"
|
||||
|
||||
type Publisher struct {
|
||||
client redis.UniversalClient
|
||||
channel string
|
||||
}
|
||||
|
||||
func NewPublisher(client redis.UniversalClient, channel string) *Publisher {
|
||||
return &Publisher{client: client, channel: channel}
|
||||
}
|
||||
|
||||
func (p *Publisher) Publish(message string) error {
|
||||
return p.client.Publish(ctx, p.channel, message).Err()
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redispubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
type Subscriber struct {
|
||||
client redis.UniversalClient
|
||||
channel string
|
||||
}
|
||||
|
||||
func NewSubscriber(client redis.UniversalClient, channel string) *Subscriber {
|
||||
return &Subscriber{client: client, channel: channel}
|
||||
}
|
||||
|
||||
func (s *Subscriber) OnMessage(ctx context.Context, callback func(string)) error {
|
||||
messageChannel := s.client.Subscribe(ctx, s.channel).Channel()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case msg := <-messageChannel:
|
||||
callback(msg.Payload)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package startrpc // import "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
@ -19,7 +19,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
@ -27,205 +26,186 @@ import (
|
||||
"time"
|
||||
|
||||
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// Start rpc server.
|
||||
func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP,
|
||||
func init() {
|
||||
prommetrics.RegistryAll()
|
||||
}
|
||||
|
||||
func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP,
|
||||
registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T,
|
||||
watchConfigNames []string, watchServiceNames []string,
|
||||
rpcFn func(ctx context.Context, config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error,
|
||||
rpcFn func(ctx context.Context, config T, client discovery.Conn, server grpc.ServiceRegistrar) error,
|
||||
options ...grpc.ServerOption) error {
|
||||
|
||||
watchConfigNames = append(watchConfigNames, conf.LogConfigFileName)
|
||||
var (
|
||||
rpcTcpAddr string
|
||||
netDone = make(chan struct{}, 2)
|
||||
netErr error
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
if notification != nil {
|
||||
conf.InitNotification(notification)
|
||||
}
|
||||
|
||||
options = append(options, mw.GrpcServer())
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP(registerIP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
if !autoSetPorts {
|
||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||
var prometheusListenAddr string
|
||||
if autoSetPorts {
|
||||
prometheusListenAddr = net.JoinHostPort(listenIP, "0")
|
||||
} else {
|
||||
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
|
||||
} else {
|
||||
rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), "0")
|
||||
prometheusListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(prometheusPort))
|
||||
}
|
||||
|
||||
getAutoPort := func() (net.Listener, int, error) {
|
||||
listener, err := net.Listen("tcp", rpcTcpAddr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
|
||||
}
|
||||
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
return listener, port, nil
|
||||
}
|
||||
watchConfigNames = append(watchConfigNames, conf.LogConfigFileName)
|
||||
|
||||
if autoSetPorts && discovery.Enable != conf.ETCD {
|
||||
return errs.New("only etcd support autoSetPorts", "rpcRegisterName", rpcRegisterName).Wrap()
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv, watchServiceNames)
|
||||
client, err := kdisc.NewDiscoveryRegister(disc, watchServiceNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer client.Close()
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
client.AddOption(
|
||||
mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")),
|
||||
)
|
||||
|
||||
// var reg *prometheus.Registry
|
||||
// var metric *grpcprometheus.ServerMetrics
|
||||
if prometheusConfig.Enable {
|
||||
// cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
|
||||
// reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
|
||||
// options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
|
||||
// grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
|
||||
go func() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case val := <-sigs:
|
||||
log.ZDebug(ctx, "recv signal", "signal", val.String())
|
||||
cancel(fmt.Errorf("signal %s", val.String()))
|
||||
}
|
||||
}()
|
||||
|
||||
if prometheusListenAddr != "" {
|
||||
options = append(
|
||||
options, mw.GrpcServer(),
|
||||
options,
|
||||
prommetricsUnaryInterceptor(rpcRegisterName),
|
||||
prommetricsStreamInterceptor(rpcRegisterName),
|
||||
)
|
||||
prometheusListener, prometheusPort, err := listenTCP(prometheusListenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZDebug(ctx, "prometheus start", "addr", prometheusListener.Addr(), "rpcRegisterName", rpcRegisterName)
|
||||
target, err := jsonutil.JsonMarshal(prommetrics.BuildDefaultTarget(registerIP, prometheusPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.SetKey(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), target); err != nil {
|
||||
if !errors.Is(err, discovery.ErrNotSupportedKeyValue) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
err := prommetrics.Start(prometheusListener)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("listener done")
|
||||
}
|
||||
cancel(fmt.Errorf("prommetrics %s %w", rpcRegisterName, err))
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
listener net.Listener
|
||||
rpcServer *grpc.Server
|
||||
rpcGracefulStop chan struct{}
|
||||
)
|
||||
|
||||
onGrpcServiceRegistrar := func(desc *grpc.ServiceDesc, impl any) {
|
||||
if rpcServer != nil {
|
||||
rpcServer.RegisterService(desc, impl)
|
||||
return
|
||||
}
|
||||
var rpcListenAddr string
|
||||
if autoSetPorts {
|
||||
listener, prometheusPort, err = getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
|
||||
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(rpcRegisterName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "etcd put err")
|
||||
}
|
||||
rpcListenAddr = net.JoinHostPort(listenIP, "0")
|
||||
} else {
|
||||
prometheusPort, err = datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||
if err != nil {
|
||||
return err
|
||||
cancel(fmt.Errorf("rpcPorts index out of range %s %w", rpcRegisterName, err))
|
||||
return
|
||||
}
|
||||
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
|
||||
rpcListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(rpcPort))
|
||||
}
|
||||
rpcListener, err := net.Listen("tcp", rpcListenAddr)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
|
||||
cancel(fmt.Errorf("listen rpc %s %s %w", rpcRegisterName, rpcListenAddr, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, discovery)
|
||||
|
||||
rpcServer = grpc.NewServer(options...)
|
||||
rpcServer.RegisterService(desc, impl)
|
||||
rpcGracefulStop = make(chan struct{})
|
||||
rpcPort := rpcListener.Addr().(*net.TCPAddr).Port
|
||||
log.ZDebug(ctx, "rpc start register", "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "rpcPort", rpcPort)
|
||||
grpcOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
rpcGracefulStop = make(chan struct{})
|
||||
go func() {
|
||||
if err := prommetrics.RpcInit(cs, listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
//metric.InitializeMetrics(srv)
|
||||
// Create a HTTP server for prometheus.
|
||||
// httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||
// if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
// netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
||||
// netDone <- struct{}{}
|
||||
// }
|
||||
<-ctx.Done()
|
||||
rpcServer.GracefulStop()
|
||||
close(rpcGracefulStop)
|
||||
}()
|
||||
} else {
|
||||
options = append(options, mw.GrpcServer())
|
||||
}
|
||||
|
||||
listener, port, err := getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", port,
|
||||
"prometheusPort", prometheusPort)
|
||||
|
||||
defer listener.Close()
|
||||
srv := grpc.NewServer(options...)
|
||||
|
||||
err = rpcFn(ctx, config, client, srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.Register(
|
||||
rpcRegisterName,
|
||||
registerIP,
|
||||
port,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := client.Register(ctx, rpcRegisterName, registerIP, rpcListener.Addr().(*net.TCPAddr).Port, grpcOpt); err != nil {
|
||||
cancel(fmt.Errorf("rpc register %s %w", rpcRegisterName, err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := srv.Serve(listener)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, "rpc start err: ", rpcTcpAddr)
|
||||
netDone <- struct{}{}
|
||||
err := rpcServer.Serve(rpcListener)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("serve end")
|
||||
}
|
||||
cancel(fmt.Errorf("rpc %s %w", rpcRegisterName, err))
|
||||
}()
|
||||
|
||||
if discovery.Enable == conf.ETCD {
|
||||
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), watchConfigNames)
|
||||
cm.Watch(ctx)
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
err = rpcFn(ctx, config, client, &grpcServiceRegistrar{onRegisterService: onGrpcServiceRegistrar})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
<-ctx.Done()
|
||||
log.ZDebug(ctx, "cmd wait done", "err", context.Cause(ctx))
|
||||
if rpcGracefulStop != nil {
|
||||
timeout := time.NewTimer(time.Second * 15)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case <-sigs:
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil {
|
||||
return err
|
||||
case <-timeout.C:
|
||||
log.ZWarn(ctx, "rcp graceful stop timeout", nil)
|
||||
case <-rpcGracefulStop:
|
||||
log.ZDebug(ctx, "rcp graceful stop done")
|
||||
}
|
||||
return nil
|
||||
case <-netDone:
|
||||
return netErr
|
||||
}
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
|
||||
func gracefulStopWithCtx(ctx context.Context, f func()) error {
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
f()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errs.New("timeout, ctx graceful stop")
|
||||
case <-done:
|
||||
return nil
|
||||
func listenTCP(addr string) (net.Listener, int, error) {
|
||||
listener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "addr", addr)
|
||||
}
|
||||
return listener, listener.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
||||
|
||||
func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
@ -249,3 +229,11 @@ func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
return grpc.ChainStreamInterceptor()
|
||||
}
|
||||
|
||||
type grpcServiceRegistrar struct {
|
||||
onRegisterService func(desc *grpc.ServiceDesc, impl any)
|
||||
}
|
||||
|
||||
func (x *grpcServiceRegistrar) RegisterService(desc *grpc.ServiceDesc, impl any) {
|
||||
x.onRegisterService(desc, impl)
|
||||
}
|
||||
|
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
)
|
||||
|
||||
func NewMinioCache(cache database.Cache) minio.Cache {
|
||||
return &minioCache{
|
||||
cache: cache,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCache struct {
|
||||
cache database.Cache
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCache) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCache) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.cache.Del(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.cache.Del(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
}
|
||||
|
||||
func (g *minioCache) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
return getCache[*minio.ImageInfo](ctx, g.cache, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
}
|
||||
|
||||
func (g *minioCache) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache[string](ctx, g.cache, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
initMemMsgCache sync.Once
|
||||
)
|
||||
|
||||
func NewMsgCache(cache database.Cache, msgDocDatabase database.Msg) cache.MsgCache {
|
||||
initMemMsgCache.Do(func() {
|
||||
memMsgCache = lru.NewLayLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
|
||||
})
|
||||
return &msgCache{
|
||||
cache: cache,
|
||||
msgDocDatabase: msgDocDatabase,
|
||||
memMsgCache: memMsgCache,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
cache database.Cache
|
||||
msgDocDatabase database.Msg
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
}
|
||||
|
||||
func (x *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (x *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return x.cache.Set(ctx, x.getSendMsgKey(id), strconv.Itoa(int(status)), time.Hour*24)
|
||||
}
|
||||
|
||||
func (x *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
key := x.getSendMsgKey(id)
|
||||
res, err := x.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, ok := res[key]
|
||||
if !ok {
|
||||
return 0, errs.Wrap(redis.Nil)
|
||||
}
|
||||
status, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "GetSendMsgStatus strconv.Atoi error", "val", val)
|
||||
}
|
||||
return int32(status), nil
|
||||
}
|
||||
|
||||
func (x *msgCache) getMsgCacheKey(conversationID string, seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
|
||||
}
|
||||
|
||||
func (x *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
if len(seqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64, len(seqs))
|
||||
for _, seq := range seqs {
|
||||
key := x.getMsgCacheKey(conversationID, seq)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
res, err := x.memMsgCache.GetBatch(keys, func(keys []string) (map[string]*model.MsgInfoModel, error) {
|
||||
findSeqs := make([]int64, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
seq, ok := keySeq[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
findSeqs = append(findSeqs, seq)
|
||||
}
|
||||
res, err := x.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv := make(map[string]*model.MsgInfoModel)
|
||||
for i := range res {
|
||||
msg := res[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
key := x.getMsgCacheKey(conversationID, msg.Msg.Seq)
|
||||
kv[key] = msg
|
||||
}
|
||||
return kv, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return datautil.Values(res), nil
|
||||
}
|
||||
|
||||
func (x msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, seq := range seqs {
|
||||
x.memMsgCache.Del(x.getMsgCacheKey(conversationID, seq))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
|
||||
for i := range msgs {
|
||||
msg := msgs[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
x.memMsgCache.Set(x.getMsgCacheKey(conversationID, msg.Msg.Seq), msg)
|
||||
}
|
||||
return nil
|
||||
}
|
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
globalOnlineCache cache.OnlineCache
|
||||
globalOnlineOnce sync.Once
|
||||
)
|
||||
|
||||
func NewOnlineCache() cache.OnlineCache {
|
||||
globalOnlineOnce.Do(func() {
|
||||
globalOnlineCache = &onlineCache{
|
||||
user: make(map[string]map[int32]struct{}),
|
||||
}
|
||||
})
|
||||
return globalOnlineCache
|
||||
}
|
||||
|
||||
type onlineCache struct {
|
||||
lock sync.RWMutex
|
||||
user map[string]map[int32]struct{}
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]int32, 0, len(pSet))
|
||||
for k := range pSet {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if ok {
|
||||
for _, p := range offline {
|
||||
delete(pSet, p)
|
||||
}
|
||||
}
|
||||
if len(online) > 0 {
|
||||
if !ok {
|
||||
pSet = make(map[int32]struct{})
|
||||
x.user[userID] = pSet
|
||||
}
|
||||
for _, p := range online {
|
||||
pSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(pSet) == 0 {
|
||||
delete(x.user, userID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
if cursor != 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
res := make(map[string][]int32)
|
||||
for k, v := range x.user {
|
||||
pSet := make([]int32, 0, len(v))
|
||||
for p := range v {
|
||||
pSet = append(pSet, p)
|
||||
}
|
||||
res[k] = pSet
|
||||
}
|
||||
return res, 0, nil
|
||||
}
|
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
)
|
||||
|
||||
func NewSeqConversationCache(sc database.SeqConversation) cache.SeqConversationCache {
|
||||
return &seqConversationCache{
|
||||
sc: sc,
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCache struct {
|
||||
sc database.SeqConversation
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
return x.sc.Malloc(ctx, conversationID, size)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return x.sc.SetMinSeq(ctx, conversationID, seq)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMinSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
res := make(map[string]int64)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = seq
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
res := make(map[string]database.SeqTime)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = database.SeqTime{Seq: seq}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return database.SeqTime{}, err
|
||||
}
|
||||
return database.SeqTime{Seq: seq}, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := x.sc.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
return x.GetMaxSeqsWithTime(ctx, conversationIDs)
|
||||
}
|
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewThirdCache(cache database.Cache) cache.ThirdCache {
|
||||
return &thirdCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
cache database.Cache
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTokenKey() string {
|
||||
return cachekey.GetGetuiTokenKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTaskIDKey() string {
|
||||
return cachekey.GetGetuiTaskIDKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
|
||||
return cachekey.GetFcmAccountTokenKey(account, platformID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) get(ctx context.Context, key string) (string, error) {
|
||||
res, err := c.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if val, ok := res[key]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return "", errs.Wrap(redis.Nil)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.cache.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second))
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
return c.get(ctx, c.getFcmAccountTokenKey(account, platformID))
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return c.cache.Del(ctx, []string{c.getFcmAccountTokenKey(account, platformID)})
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
return c.cache.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID), 1)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return c.cache.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), strconv.Itoa(value), 0)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
str, err := c.get(ctx, c.getUserBadgeUnreadCountSumKey(userID))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "strconv.Atoi", "str", str)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTokenKey())
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTaskIDKey())
|
||||
}
|
130
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
130
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel {
|
||||
c := &tokenCache{cache: cache}
|
||||
c.accessExpire = c.getExpireTime(accessExpire)
|
||||
return c
|
||||
}
|
||||
|
||||
type tokenCache struct {
|
||||
cache database.Cache
|
||||
accessExpire time.Duration
|
||||
}
|
||||
|
||||
func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string {
|
||||
return cachekey.GetTokenKey(userID, platformID) + ":" + token
|
||||
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire)
|
||||
}
|
||||
|
||||
// SetTokenFlagEx set token and flag with expire time
|
||||
func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
|
||||
prefix := x.getTokenKey(userID, platformID, "")
|
||||
m, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
mm := make(map[string]int)
|
||||
for k, v := range m {
|
||||
state, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID)
|
||||
continue
|
||||
}
|
||||
mm[strings.TrimPrefix(k, prefix)] = state
|
||||
}
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) {
|
||||
prefix := cachekey.UidPidToken + userID + ":"
|
||||
tokens, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[int]map[string]int)
|
||||
for key, flagStr := range tokens {
|
||||
flag, err := strconv.Atoi(flagStr)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
platformID, err := strconv.Atoi(arr[0])
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
token := arr[1]
|
||||
if token == "" {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
tk, ok := res[platformID]
|
||||
if !ok {
|
||||
tk = make(map[string]int)
|
||||
res[platformID] = tk
|
||||
}
|
||||
tk[token] = flag
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
|
||||
for token, flag := range m {
|
||||
err := x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error {
|
||||
for prefix, tokenFlag := range tokens {
|
||||
for token, flag := range tokenFlag {
|
||||
flagStr := fmt.Sprintf("%v", flag)
|
||||
if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for _, token := range fields {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, token))
|
||||
}
|
||||
return x.cache.Del(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *tokenCache) getExpireTime(t int64) time.Duration {
|
||||
return time.Hour * 24 * time.Duration(t)
|
||||
}
|
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func getCache[V any](ctx context.Context, cache database.Cache, key string, expireTime time.Duration, fn func(ctx context.Context) (V, error)) (V, error) {
|
||||
getDB := func() (V, bool, error) {
|
||||
res, err := cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
var val V
|
||||
return val, false, err
|
||||
}
|
||||
var val V
|
||||
if str, ok := res[key]; ok {
|
||||
if json.Unmarshal([]byte(str), &val) != nil {
|
||||
return val, false, err
|
||||
}
|
||||
return val, true, nil
|
||||
}
|
||||
return val, false, nil
|
||||
}
|
||||
dbVal, ok, err := getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
lockValue, err := cache.Lock(ctx, key, time.Minute)
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
defer func() {
|
||||
if err := cache.Unlock(ctx, key, lockValue); err != nil {
|
||||
log.ZError(ctx, "unlock cache key", err, "key", key, "value", lockValue)
|
||||
}
|
||||
}()
|
||||
dbVal, ok, err = getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
val, err := fn(ctx)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
data, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
if err := cache.Set(ctx, key, string(data), expireTime); err != nil {
|
||||
return val, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
63
pkg/common/storage/cache/redis/batch.go
vendored
63
pkg/common/storage/cache/redis/batch.go
vendored
@ -3,28 +3,65 @@ package redis
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
|
||||
type Client struct {
|
||||
rdb redis.UniversalClient
|
||||
_ rockscache.Options
|
||||
_ singleflight.Group
|
||||
}
|
||||
return (*Client)(unsafe.Pointer(cli)).rdb
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.LockExpire = rocksCacheTimeout
|
||||
opts.WaitReplicasTimeout = rocksCacheTimeout
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return &opts
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
func newRocksCacheClient(rdb redis.UniversalClient) *rocksCacheClient {
|
||||
if rdb == nil {
|
||||
return &rocksCacheClient{}
|
||||
}
|
||||
rc := &rocksCacheClient{
|
||||
rdb: rdb,
|
||||
client: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
type rocksCacheClient struct {
|
||||
rdb redis.UniversalClient
|
||||
client *rockscache.Client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetClient() *rockscache.Client {
|
||||
return x.client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) Disable() bool {
|
||||
return x.client == nil
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetRedis() redis.UniversalClient {
|
||||
return x.rdb
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetBatchDeleter(topics ...string) cache.BatchDeleter {
|
||||
return NewBatchDeleterRedis(x, topics)
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rocksCacheClient, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx, ids)
|
||||
}
|
||||
findKeys := make([]string, 0, len(ids))
|
||||
keyId := make(map[string]K)
|
||||
for _, id := range ids {
|
||||
@ -35,13 +72,13 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
|
||||
keyId[key] = id
|
||||
findKeys = append(findKeys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, rcClient.GetRedis(), findKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*V, 0, len(findKeys))
|
||||
for _, keys := range slotKeys {
|
||||
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
indexCache, err := rcClient.GetClient().FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
queryIds := make([]K, 0, len(idx))
|
||||
idIndex := make(map[K]int)
|
||||
for _, index := range idx {
|
||||
|
79
pkg/common/storage/cache/redis/batch_handler.go
vendored
79
pkg/common/storage/cache/redis/batch_handler.go
vendored
@ -1,23 +1,11 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
@ -25,7 +13,6 @@ import (
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -41,10 +28,10 @@ type BatchDeleterRedis struct {
|
||||
}
|
||||
|
||||
// NewBatchDeleterRedis creates a new BatchDeleterRedis instance.
|
||||
func NewBatchDeleterRedis(redisClient redis.UniversalClient, options *rockscache.Options, redisPubTopics []string) *BatchDeleterRedis {
|
||||
func NewBatchDeleterRedis(rcClient *rocksCacheClient, redisPubTopics []string) *BatchDeleterRedis {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: redisClient,
|
||||
rocksClient: rockscache.NewClient(redisClient, *options),
|
||||
redisClient: rcClient.GetRedis(),
|
||||
rocksClient: rcClient.GetClient(),
|
||||
redisPubTopics: redisPubTopics,
|
||||
}
|
||||
}
|
||||
@ -107,21 +94,29 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
|
||||
c.keys = append(c.keys, keys...)
|
||||
}
|
||||
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.LockExpire = rocksCacheTimeout
|
||||
opts.WaitReplicasTimeout = rocksCacheTimeout
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
type disableBatchDeleter struct{}
|
||||
|
||||
return &opts
|
||||
func (x disableBatchDeleter) ChainExecDel(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
func (x disableBatchDeleter) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) Clone() cache.BatchDeleter {
|
||||
return x
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) AddKeys(keys ...string) {}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rocksCacheClient, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx)
|
||||
}
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
v, err := rcClient.GetClient().Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
//log.ZError(ctx, "getCache query database failed", err, "key", key)
|
||||
@ -152,31 +147,3 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
//func batchGetCache[T any, K comparable](
|
||||
// ctx context.Context,
|
||||
// rcClient *rockscache.Client,
|
||||
// expire time.Duration,
|
||||
// keys []K,
|
||||
// keyFn func(key K) string,
|
||||
// fns func(ctx context.Context, key K) (T, error),
|
||||
//) ([]T, error) {
|
||||
// if len(keys) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
// res := make([]T, 0, len(keys))
|
||||
// for _, key := range keys {
|
||||
// val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
// return fns(ctx, key)
|
||||
// })
|
||||
// if err != nil {
|
||||
// if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
|
||||
// continue
|
||||
// }
|
||||
// return nil, errs.Wrap(err)
|
||||
// }
|
||||
// res = append(res, val)
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
31
pkg/common/storage/cache/redis/black.go
vendored
31
pkg/common/storage/cache/redis/black.go
vendored
@ -1,29 +1,14 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -33,18 +18,16 @@ const (
|
||||
type BlackCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
blackDB database.Black
|
||||
}
|
||||
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black, options *rockscache.Options) cache.BlackCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic})
|
||||
b := localCache.Friend
|
||||
log.ZDebug(context.Background(), "black local cache init", "Topic", b.Topic, "SlotNum", b.SlotNum, "SlotSize", b.SlotSize, "enable", b.Enable())
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black) cache.BlackCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &BlackCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
rcClient: rc,
|
||||
blackDB: blackDB,
|
||||
}
|
||||
}
|
||||
|
35
pkg/common/storage/cache/redis/conversation.go
vendored
35
pkg/common/storage/cache/redis/conversation.go
vendored
@ -1,47 +1,30 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/encrypt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
conversationExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, opts *rockscache.Options, db database.Conversation) cache.ConversationCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Conversation.Topic})
|
||||
c := localCache.Conversation
|
||||
log.ZDebug(context.Background(), "conversation local cache init", "Topic", c.Topic, "SlotNum", c.SlotNum, "SlotSize", c.SlotSize, "enable", c.Enable())
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, db database.Conversation) cache.ConversationCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, *opts),
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Conversation.Topic),
|
||||
rcClient: rc,
|
||||
conversationDB: db,
|
||||
expireTime: conversationExpireTime,
|
||||
}
|
||||
@ -49,7 +32,7 @@ func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCac
|
||||
|
||||
type ConversationRedisCache struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
conversationDB database.Conversation
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
15
pkg/common/storage/cache/redis/doc.go
vendored
15
pkg/common/storage/cache/redis/doc.go
vendored
@ -1,15 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
29
pkg/common/storage/cache/redis/friend.go
vendored
29
pkg/common/storage/cache/redis/friend.go
vendored
@ -1,30 +1,14 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
@ -38,21 +22,18 @@ type FriendCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
friendDB database.Friend
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
syncCount int
|
||||
}
|
||||
|
||||
// NewFriendCacheRedis creates a new instance of FriendCacheRedis.
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend,
|
||||
options *rockscache.Options) cache.FriendCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.Friend.Topic})
|
||||
f := localCache.Friend
|
||||
log.ZDebug(context.Background(), "friend local cache init", "Topic", f.Topic, "SlotNum", f.SlotNum, "SlotSize", f.SlotSize, "enable", f.Enable())
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend) cache.FriendCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &FriendCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
friendDB: friendDB,
|
||||
expireTime: friendExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
rcClient: rc,
|
||||
}
|
||||
}
|
||||
|
||||
|
38
pkg/common/storage/cache/redis/group.go
vendored
38
pkg/common/storage/cache/redis/group.go
vendored
@ -1,17 +1,3 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
@ -19,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
@ -36,34 +21,21 @@ const (
|
||||
groupExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
var errIndex = errs.New("err index")
|
||||
|
||||
type GroupCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
groupDB database.Group
|
||||
groupMemberDB database.GroupMember
|
||||
groupRequestDB database.GroupRequest
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
groupHash cache.GroupHash
|
||||
}
|
||||
|
||||
func NewGroupCacheRedis(
|
||||
rdb redis.UniversalClient,
|
||||
localCache *config.LocalCache,
|
||||
groupDB database.Group,
|
||||
groupMemberDB database.GroupMember,
|
||||
groupRequestDB database.GroupRequest,
|
||||
hashCode cache.GroupHash,
|
||||
opts *rockscache.Options,
|
||||
) cache.GroupCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, opts, []string{localCache.Group.Topic})
|
||||
g := localCache.Group
|
||||
log.ZDebug(context.Background(), "group local cache init", "Topic", g.Topic, "SlotNum", g.SlotNum, "SlotSize", g.SlotSize, "enable", g.Enable())
|
||||
|
||||
func NewGroupCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, groupDB database.Group, groupMemberDB database.GroupMember, groupRequestDB database.GroupRequest, hashCode cache.GroupHash) cache.GroupCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, *opts),
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Group.Topic),
|
||||
rcClient: rc,
|
||||
expireTime: groupExpireTime,
|
||||
groupDB: groupDB,
|
||||
groupMemberDB: groupMemberDB,
|
||||
|
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &minioCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
20
pkg/common/storage/cache/redis/msg.go
vendored
20
pkg/common/storage/cache/redis/msg.go
vendored
@ -3,7 +3,8 @@ package redis
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
@ -11,7 +12,6 @@ import (
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
) //
|
||||
|
||||
// msgCacheTimeout is expiration time of message cache, 86400 seconds
|
||||
@ -19,15 +19,13 @@ const msgCacheTimeout = time.Hour * 24
|
||||
|
||||
func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache {
|
||||
return &msgCache{
|
||||
rdb: client,
|
||||
rcClient: rockscache.NewClient(client, *GetRocksCacheOptions()),
|
||||
rcClient: newRocksCacheClient(client),
|
||||
msgDocDatabase: db,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
rdb redis.UniversalClient
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
msgDocDatabase database.Msg
|
||||
}
|
||||
|
||||
@ -36,11 +34,11 @@ func (c *msgCache) getSendMsgKey(id string) string {
|
||||
}
|
||||
|
||||
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
|
||||
return errs.Wrap(c.rcClient.GetRedis().Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rdb.Get(ctx, c.getSendMsgKey(id)).Int()
|
||||
result, err := c.rcClient.GetRedis().Get(ctx, c.getSendMsgKey(id)).Int()
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
@ -67,12 +65,12 @@ func (c *msgCache) DelMessageBySeqs(ctx context.Context, conversationID string,
|
||||
keys := datautil.Slice(seqs, func(seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
})
|
||||
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(c.rcClient), keys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, c.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
if err := c.rcClient.TagAsDeletedBatch2(ctx, keys); err != nil {
|
||||
if err := c.rcClient.GetClient().TagAsDeletedBatch2(ctx, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -88,7 +86,7 @@ func (c *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.rcClient.RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil {
|
||||
if err := c.rcClient.GetClient().RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
12
pkg/common/storage/cache/redis/online.go
vendored
12
pkg/common/storage/cache/redis/online.go
vendored
@ -3,18 +3,24 @@ package redis
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
|
||||
if rdb == nil || config.Standalone() {
|
||||
return mcache.NewOnlineCache()
|
||||
}
|
||||
return &userOnline{
|
||||
rdb: rdb,
|
||||
expire: cachekey.OnlineExpire,
|
||||
|
@ -2,7 +2,7 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -28,83 +28,83 @@ type Config struct {
|
||||
// Option is a function type for configuring Config
|
||||
type Option func(c *Config)
|
||||
|
||||
// NewRedisShardManager creates a new RedisShardManager instance
|
||||
func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
|
||||
config := &Config{
|
||||
batchSize: defaultBatchSize, // Default batch size is 50 keys
|
||||
continueOnError: false,
|
||||
concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
rsm := &RedisShardManager{
|
||||
redisClient: redisClient,
|
||||
config: config,
|
||||
}
|
||||
return rsm
|
||||
}
|
||||
|
||||
// WithBatchSize sets the number of keys to process per batch
|
||||
func WithBatchSize(size int) Option {
|
||||
return func(c *Config) {
|
||||
c.batchSize = size
|
||||
}
|
||||
}
|
||||
|
||||
// WithContinueOnError sets whether to continue processing on error
|
||||
func WithContinueOnError(continueOnError bool) Option {
|
||||
return func(c *Config) {
|
||||
c.continueOnError = continueOnError
|
||||
}
|
||||
}
|
||||
|
||||
// WithConcurrentLimit sets the concurrency limit
|
||||
func WithConcurrentLimit(limit int) Option {
|
||||
return func(c *Config) {
|
||||
c.concurrentLimit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
ctx context.Context,
|
||||
keys []string,
|
||||
processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
) error {
|
||||
|
||||
// Group keys by slot
|
||||
slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(rsm.config.concurrentLimit)
|
||||
|
||||
// Process keys in each slot using the provided function
|
||||
for slot, singleSlotKeys := range slots {
|
||||
batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
|
||||
for _, batch := range batches {
|
||||
slot, batch := slot, batch // Avoid closure capture issue
|
||||
g.Go(func() error {
|
||||
err := processFunc(ctx, slot, batch)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
if !rsm.config.continueOnError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
//// NewRedisShardManager creates a new RedisShardManager instance
|
||||
//func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
|
||||
// config := &Config{
|
||||
// batchSize: defaultBatchSize, // Default batch size is 50 keys
|
||||
// continueOnError: false,
|
||||
// concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
|
||||
// }
|
||||
// for _, opt := range opts {
|
||||
// opt(config)
|
||||
// }
|
||||
// rsm := &RedisShardManager{
|
||||
// redisClient: redisClient,
|
||||
// config: config,
|
||||
// }
|
||||
// return rsm
|
||||
//}
|
||||
//
|
||||
//// WithBatchSize sets the number of keys to process per batch
|
||||
//func WithBatchSize(size int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.batchSize = size
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithContinueOnError sets whether to continue processing on error
|
||||
//func WithContinueOnError(continueOnError bool) Option {
|
||||
// return func(c *Config) {
|
||||
// c.continueOnError = continueOnError
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithConcurrentLimit sets the concurrency limit
|
||||
//func WithConcurrentLimit(limit int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.concurrentLimit = limit
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
//func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
// ctx context.Context,
|
||||
// keys []string,
|
||||
// processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
//) error {
|
||||
//
|
||||
// // Group keys by slot
|
||||
// slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// g, ctx := errgroup.WithContext(ctx)
|
||||
// g.SetLimit(rsm.config.concurrentLimit)
|
||||
//
|
||||
// // Process keys in each slot using the provided function
|
||||
// for slot, singleSlotKeys := range slots {
|
||||
// batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
|
||||
// for _, batch := range batches {
|
||||
// slot, batch := slot, batch // Avoid closure capture issue
|
||||
// g.Go(func() error {
|
||||
// err := processFunc(ctx, slot, batch)
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
// if !rsm.config.continueOnError {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if err := g.Wait(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
|
||||
// groupKeysBySlot groups keys by their Redis cluster hash slots.
|
||||
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
|
||||
@ -197,15 +197,15 @@ func ProcessKeysBySlot(
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteCacheBySlot(ctx context.Context, rcClient *rockscache.Client, keys []string) error {
|
||||
func DeleteCacheBySlot(ctx context.Context, rcClient *rocksCacheClient, keys []string) error {
|
||||
switch len(keys) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return rcClient.TagAsDeletedBatch2(ctx, keys)
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
default:
|
||||
return ProcessKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return rcClient.TagAsDeletedBatch2(ctx, keys)
|
||||
return ProcessKeysBySlot(ctx, rcClient.GetRedis(), keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
86
pkg/common/storage/cache/redis/s3.go
vendored
86
pkg/common/storage/cache/redis/s3.go
vendored
@ -1,39 +1,23 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &objectCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
}
|
||||
@ -42,7 +26,7 @@ func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) c
|
||||
type objectCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
objDB database.ObjectInfo
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
@ -76,11 +60,10 @@ func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name stri
|
||||
}
|
||||
|
||||
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &s3CacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
}
|
||||
@ -89,7 +72,7 @@ func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
|
||||
type s3CacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
s3 s3.Interface
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
@ -110,52 +93,3 @@ func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (
|
||||
return g.s3.StatObject(ctx, name)
|
||||
})
|
||||
}
|
||||
|
||||
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
batchHandler := NewBatchDeleterRedis(rdb, &opts, nil)
|
||||
return &minioCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
rcClient: rockscache.NewClient(rdb, opts),
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
|
@ -4,33 +4,35 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
|
||||
if rdb == nil {
|
||||
return mcache.NewSeqConversationCache(mgo)
|
||||
}
|
||||
return &seqConversationCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
lockTime: time.Second * 3,
|
||||
dataTime: time.Hour * 24 * 365,
|
||||
minSeqExpireTime: time.Hour,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
rcClient: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqConversation
|
||||
rocks *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
lockTime time.Duration
|
||||
dataTime time.Duration
|
||||
minSeqExpireTime time.Duration
|
||||
@ -45,7 +47,7 @@ func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationI
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return getCache(ctx, s.rcClient, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMinSeq(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
@ -68,7 +70,7 @@ func (s *seqConversationCacheRedis) getSingleMaxSeqWithTime(ctx context.Context,
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
|
||||
result := make([]*redis.StringCmd, len(keys))
|
||||
pipe := s.rdb.Pipeline()
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HGet(ctx, key, "CURR")
|
||||
}
|
||||
@ -99,7 +101,7 @@ func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []s
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error {
|
||||
result := make([]*redis.SliceCmd, len(keys))
|
||||
pipe := s.rdb.Pipeline()
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HMGet(ctx, key, "CURR", "TIME")
|
||||
}
|
||||
@ -157,7 +159,7 @@ func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversation
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -190,7 +192,7 @@ func (s *seqConversationCacheRedis) GetMaxSeqsWithTime(ctx context.Context, conv
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -234,7 +236,7 @@ redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 0
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64()
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
@ -305,7 +307,7 @@ table.insert(result, last_seq)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice()
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
@ -438,7 +440,7 @@ func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[str
|
||||
return err
|
||||
}
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
return DeleteCacheBySlot(ctx, s.rcClient, keys)
|
||||
}
|
||||
|
||||
// GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated
|
||||
@ -456,7 +458,7 @@ func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context,
|
||||
key2conversationID[key] = conversationID
|
||||
keys = append(keys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -465,7 +467,7 @@ func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context,
|
||||
if len(keys) == 0 {
|
||||
continue
|
||||
}
|
||||
pipe := s.rdb.Pipeline()
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
cmds := make([]*redis.SliceCmd, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME"))
|
||||
|
23
pkg/common/storage/cache/redis/seq_user.go
vendored
23
pkg/common/storage/cache/redis/seq_user.go
vendored
@ -2,31 +2,29 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
|
||||
return &seqUserCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
readSeqWriteRatio: 100,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
readExpireTime: time.Hour * 24 * 30,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
rocks: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqUserCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqUser
|
||||
rocks *rockscache.Client
|
||||
rocks *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
readExpireTime time.Duration
|
||||
readSeqWriteRatio int64
|
||||
@ -54,7 +52,7 @@ func (s *seqUserCacheRedis) SetUserMaxSeq(ctx context.Context, conversationID st
|
||||
if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
return s.rocks.GetClient().TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
@ -74,12 +72,15 @@ func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID s
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if s.rocks.GetRedis() == nil {
|
||||
return s.SetUserReadSeqToDB(ctx, conversationID, userID, seq)
|
||||
}
|
||||
dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbSeq < seq {
|
||||
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
if err := s.rocks.GetClient().RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
@ -109,12 +110,12 @@ func (s *seqUserCacheRedis) setUserRedisReadSeqs(ctx context.Context, userID str
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rocks.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
pipe := s.rdb.Pipeline()
|
||||
pipe := s.rocks.GetRedis().Pipeline()
|
||||
for _, key := range keys {
|
||||
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
|
||||
pipe.Expire(ctx, key, s.readExpireTime)
|
||||
|
17
pkg/common/storage/cache/redis/third.go
vendored
17
pkg/common/storage/cache/redis/third.go
vendored
@ -1,26 +1,13 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewThirdCache(rdb redis.UniversalClient) cache.ThirdCache {
|
||||
|
28
pkg/common/storage/cache/redis/user.go
vendored
28
pkg/common/storage/cache/redis/user.go
vendored
@ -1,30 +1,16 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -38,19 +24,17 @@ type UserCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
userDB database.User
|
||||
expireTime time.Duration
|
||||
rcClient *rockscache.Client
|
||||
rcClient *rocksCacheClient
|
||||
}
|
||||
|
||||
func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB database.User, options *rockscache.Options) cache.UserCache {
|
||||
batchHandler := NewBatchDeleterRedis(rdb, options, []string{localCache.User.Topic})
|
||||
u := localCache.User
|
||||
log.ZDebug(context.Background(), "user local cache init", "Topic", u.Topic, "SlotNum", u.SlotNum, "SlotSize", u.SlotSize, "enable", u.Enable())
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &UserCacheRedis{
|
||||
BatchDeleter: batchHandler,
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.User.Topic),
|
||||
rdb: rdb,
|
||||
userDB: userDB,
|
||||
expireTime: userExpireTime,
|
||||
rcClient: rockscache.NewClient(rdb, *options),
|
||||
rcClient: rc,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func NewGroupDatabase(
|
||||
groupMemberDB: groupMemberDB,
|
||||
groupRequestDB: groupRequestDB,
|
||||
ctxTx: ctxTx,
|
||||
cache: redis2.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash, redis2.GetRocksCacheOptions()),
|
||||
cache: redis2.NewGroupCacheRedis(rdb, localCache, groupDB, groupMemberDB, groupRequestDB, groupHash),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,19 +18,21 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/openimsdk/tools/mq"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -38,7 +40,6 @@ import (
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
)
|
||||
|
||||
@ -102,22 +103,14 @@ type CommonMsgDatabase interface {
|
||||
GetLastMessage(ctx context.Context, conversationIDS []string, userID string) (map[string]*sdkws.MsgData, error)
|
||||
}
|
||||
|
||||
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
|
||||
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
producerToRedis, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToRedisTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, producer mq.Producer) CommonMsgDatabase {
|
||||
return &commonMsgDatabase{
|
||||
msgDocDatabase: msgDocModel,
|
||||
msgCache: msg,
|
||||
seqUser: seqUser,
|
||||
seqConversation: seqConversation,
|
||||
producer: producerToRedis,
|
||||
}, nil
|
||||
producer: producer,
|
||||
}
|
||||
}
|
||||
|
||||
type commonMsgDatabase struct {
|
||||
@ -126,13 +119,16 @@ type commonMsgDatabase struct {
|
||||
msgCache cache.MsgCache
|
||||
seqConversation cache.SeqConversationCache
|
||||
seqUser cache.SeqUser
|
||||
producer *kafka.Producer
|
||||
producer mq.Producer
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *sdkws.MsgData) error {
|
||||
_, _, err := db.producer.SendMessage(ctx, key, msg2mq)
|
||||
data, err := proto.Marshal(msg2mq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return db.producer.SendMessage(ctx, key, data)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) batchInsertBlock(ctx context.Context, conversationID string, fields []any, key int8, firstSeq int64) error {
|
||||
if len(fields) == 0 {
|
||||
|
@ -2,11 +2,13 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/mq"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
@ -14,7 +16,6 @@ import (
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
||||
@ -32,30 +33,30 @@ type MsgTransferDatabase interface {
|
||||
SetHasReadSeqToDB(ctx context.Context, conversationID string, userSeqMap map[string]int64) error
|
||||
|
||||
// to mq
|
||||
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error)
|
||||
MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) error
|
||||
MsgToMongoMQ(ctx context.Context, key, conversationID string, msgs []*sdkws.MsgData, lastSeq int64) error
|
||||
}
|
||||
|
||||
func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (MsgTransferDatabase, error) {
|
||||
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
producerToMongo, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
producerToPush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewMsgTransferDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, mongoProducer, pushProducer mq.Producer) (MsgTransferDatabase, error) {
|
||||
//conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//producerToMongo, err := kafka.NewKafkaProducerV2(conf, kafkaConf.Address, kafkaConf.ToMongoTopic)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//producerToPush, err := kafka.NewKafkaProducerV2(conf, kafkaConf.Address, kafkaConf.ToPushTopic)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
return &msgTransferDatabase{
|
||||
msgDocDatabase: msgDocModel,
|
||||
msgCache: msg,
|
||||
seqUser: seqUser,
|
||||
seqConversation: seqConversation,
|
||||
producerToMongo: producerToMongo,
|
||||
producerToPush: producerToPush,
|
||||
producerToMongo: mongoProducer,
|
||||
producerToPush: pushProducer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -65,8 +66,8 @@ type msgTransferDatabase struct {
|
||||
msgCache cache.MsgCache
|
||||
seqConversation cache.SeqConversationCache
|
||||
seqUser cache.SeqUser
|
||||
producerToMongo *kafka.Producer
|
||||
producerToPush *kafka.Producer
|
||||
producerToMongo mq.Producer
|
||||
producerToPush mq.Producer
|
||||
}
|
||||
|
||||
func (db *msgTransferDatabase) BatchInsertChat2DB(ctx context.Context, conversationID string, msgList []*sdkws.MsgData, currentMaxSeq int64) error {
|
||||
@ -281,19 +282,25 @@ func (db *msgTransferDatabase) SetHasReadSeqToDB(ctx context.Context, conversati
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) (int32, int64, error) {
|
||||
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
|
||||
func (db *msgTransferDatabase) MsgToPushMQ(ctx context.Context, key, conversationID string, msg2mq *sdkws.MsgData) error {
|
||||
data, err := proto.Marshal(&pbmsg.PushMsgDataToMQ{MsgData: msg2mq, ConversationID: conversationID})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
|
||||
return 0, 0, err
|
||||
return err
|
||||
}
|
||||
return partition, offset, nil
|
||||
if err := db.producerToPush.SendMessage(ctx, key, data); err != nil {
|
||||
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "conversationID", conversationID)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *msgTransferDatabase) MsgToMongoMQ(ctx context.Context, key, conversationID string, messages []*sdkws.MsgData, lastSeq int64) error {
|
||||
if len(messages) > 0 {
|
||||
_, _, err := db.producerToMongo.SendMessage(ctx, key, &pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
|
||||
data, err := proto.Marshal(&pbmsg.MsgDataToMongoByMQ{LastSeq: lastSeq, ConversationID: conversationID, MsgData: messages})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := db.producerToMongo.SendMessage(ctx, key, data); err != nil {
|
||||
log.ZError(ctx, "MsgToMongoMQ", err, "key", key, "conversationID", conversationID, "lastSeq", lastSeq)
|
||||
return err
|
||||
}
|
||||
|
@ -17,12 +17,12 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/mq"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type PushDatabase interface {
|
||||
@ -32,21 +32,13 @@ type PushDatabase interface {
|
||||
|
||||
type pushDataBase struct {
|
||||
cache cache.ThirdCache
|
||||
producerToOfflinePush *kafka.Producer
|
||||
producerToOfflinePush mq.Producer
|
||||
}
|
||||
|
||||
func NewPushDatabase(cache cache.ThirdCache, kafkaConf *config.Kafka) PushDatabase {
|
||||
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
producerToOfflinePush, err := kafka.NewKafkaProducer(conf, kafkaConf.Address, kafkaConf.ToOfflinePushTopic)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
func NewPushDatabase(cache cache.ThirdCache, offlinePushProducer mq.Producer) PushDatabase {
|
||||
return &pushDataBase{
|
||||
cache: cache,
|
||||
producerToOfflinePush: producerToOfflinePush,
|
||||
producerToOfflinePush: offlinePushProducer,
|
||||
}
|
||||
}
|
||||
|
||||
@ -55,7 +47,12 @@ func (p *pushDataBase) DelFcmToken(ctx context.Context, userID string, platformI
|
||||
}
|
||||
|
||||
func (p *pushDataBase) MsgToOfflinePushMQ(ctx context.Context, key string, userIDs []string, msg2mq *sdkws.MsgData) error {
|
||||
_, _, err := p.producerToOfflinePush.SendMessage(ctx, key, &push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
|
||||
log.ZInfo(ctx, "message is push to offlinePush topic", "key", key, "userIDs", userIDs, "msg", msg2mq.String())
|
||||
data, err := proto.Marshal(&push.PushMsgReq{MsgData: msg2mq, UserIDs: userIDs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.producerToOfflinePush.SendMessage(ctx, key, data); err != nil {
|
||||
log.ZError(ctx, "message is push to offlinePush topic", err, "key", key, "userIDs", userIDs, "msg", msg2mq.String())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
type S3Database interface {
|
||||
PartLimit() *s3.PartLimit
|
||||
PartLimit() (*s3.PartLimit, error)
|
||||
PartSize(ctx context.Context, size int64) (int64, error)
|
||||
AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error)
|
||||
InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error)
|
||||
@ -65,7 +65,7 @@ func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
return s.s3.PartSize(ctx, size)
|
||||
}
|
||||
|
||||
func (s *s3Database) PartLimit() *s3.PartLimit {
|
||||
func (s *s3Database) PartLimit() (*s3.PartLimit, error) {
|
||||
return s.s3.PartLimit()
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
)
|
||||
@ -29,3 +30,85 @@ type Black interface {
|
||||
FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error)
|
||||
FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error)
|
||||
}
|
||||
|
||||
var (
|
||||
_ Black = (*mgoImpl)(nil)
|
||||
_ Black = (*redisImpl)(nil)
|
||||
)
|
||||
|
||||
type mgoImpl struct {
|
||||
}
|
||||
|
||||
func (m *mgoImpl) Create(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) Delete(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) Find(ctx context.Context, blacks []*model.Black) (blackList []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) Take(ctx context.Context, ownerUserID, blockUserID string) (black *model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mgoImpl) FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
type redisImpl struct {
|
||||
}
|
||||
|
||||
func (r *redisImpl) Create(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) Delete(ctx context.Context, blacks []*model.Black) (err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) Find(ctx context.Context, blacks []*model.Black) (blackList []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) Take(ctx context.Context, ownerUserID, blockUserID string) (black *model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) FindOwnerBlacks(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (total int64, blacks []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) FindOwnerBlackInfos(ctx context.Context, ownerUserID string, userIDs []string) (blacks []*model.Black, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *redisImpl) FindBlackUserIDs(ctx context.Context, ownerUserID string) (blackUserIDs []string, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
16
pkg/common/storage/database/cache.go
Normal file
16
pkg/common/storage/database/cache.go
Normal file
@ -0,0 +1,16 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
Get(ctx context.Context, key []string) (map[string]string, error)
|
||||
Prefix(ctx context.Context, prefix string) (map[string]string, error)
|
||||
Set(ctx context.Context, key string, value string, expireAt time.Duration) error
|
||||
Incr(ctx context.Context, key string, value int) (int, error)
|
||||
Del(ctx context.Context, key []string) error
|
||||
Lock(ctx context.Context, key string, duration time.Duration) (string, error)
|
||||
Unlock(ctx context.Context, key string, value string) error
|
||||
}
|
183
pkg/common/storage/database/mgo/cache.go
Normal file
183
pkg/common/storage/database/mgo/cache.go
Normal file
@ -0,0 +1,183 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func NewCacheMgo(db *mongo.Database) (*CacheMgo, error) {
|
||||
coll := db.Collection(database.CacheName)
|
||||
_, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{
|
||||
{
|
||||
Keys: bson.D{
|
||||
{Key: "key", Value: 1},
|
||||
},
|
||||
Options: options.Index().SetUnique(true),
|
||||
},
|
||||
{
|
||||
Keys: bson.D{
|
||||
{Key: "expire_at", Value: 1},
|
||||
},
|
||||
Options: options.Index().SetExpireAfterSeconds(0),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return &CacheMgo{coll: coll}, nil
|
||||
}
|
||||
|
||||
type CacheMgo struct {
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (x *CacheMgo) findToMap(res []model.Cache, now time.Time) map[string]string {
|
||||
kv := make(map[string]string)
|
||||
for _, re := range res {
|
||||
if re.ExpireAt != nil && re.ExpireAt.Before(now) {
|
||||
continue
|
||||
}
|
||||
kv[re.Key] = re.Value
|
||||
}
|
||||
return kv
|
||||
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Get(ctx context.Context, key []string) (map[string]string, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
now := time.Now()
|
||||
res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{
|
||||
"key": bson.M{"$in": key},
|
||||
"$or": []bson.M{
|
||||
{"expire_at": bson.M{"$gt": now}},
|
||||
{"expire_at": nil},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x.findToMap(res, now), nil
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Prefix(ctx context.Context, prefix string) (map[string]string, error) {
|
||||
now := time.Now()
|
||||
res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{
|
||||
"key": bson.M{"$regex": "^" + prefix},
|
||||
"$or": []bson.M{
|
||||
{"expire_at": bson.M{"$gt": now}},
|
||||
{"expire_at": nil},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x.findToMap(res, now), nil
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Set(ctx context.Context, key string, value string, expireAt time.Duration) error {
|
||||
cv := &model.Cache{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
if expireAt > 0 {
|
||||
now := time.Now().Add(expireAt)
|
||||
cv.ExpireAt = &now
|
||||
}
|
||||
opt := options.Update().SetUpsert(true)
|
||||
return mongoutil.UpdateOne(ctx, x.coll, bson.M{"key": key}, bson.M{"$set": cv}, false, opt)
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Incr(ctx context.Context, key string, value int) (int, error) {
|
||||
pipeline := mongo.Pipeline{
|
||||
{
|
||||
{"$set", bson.M{
|
||||
"value": bson.M{
|
||||
"$toString": bson.M{
|
||||
"$add": bson.A{
|
||||
bson.M{"$toInt": "$value"},
|
||||
value,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
opt := options.FindOneAndUpdate().SetReturnDocument(options.After)
|
||||
res, err := mongoutil.FindOneAndUpdate[model.Cache](ctx, x.coll, bson.M{"key": key}, pipeline, opt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.Atoi(res.Value)
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Del(ctx context.Context, key []string) error {
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, err := x.coll.DeleteMany(ctx, bson.M{"key": bson.M{"$in": key}})
|
||||
return err
|
||||
}
|
||||
|
||||
func (x *CacheMgo) lockKey(key string) string {
|
||||
return "LOCK_" + key
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Lock(ctx context.Context, key string, duration time.Duration) (string, error) {
|
||||
tmp, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if duration <= 0 || duration > time.Minute*10 {
|
||||
duration = time.Minute * 10
|
||||
}
|
||||
cv := &model.Cache{
|
||||
Key: x.lockKey(key),
|
||||
Value: tmp.String(),
|
||||
ExpireAt: nil,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
wait := func() error {
|
||||
timeout := time.NewTimer(time.Millisecond * 100)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timeout.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
for {
|
||||
if err := mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": key, "expire_at": bson.M{"$lt": time.Now()}}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
expireAt := time.Now().Add(duration)
|
||||
cv.ExpireAt = &expireAt
|
||||
if err := mongoutil.InsertMany[*model.Cache](ctx, x.coll, []*model.Cache{cv}); err != nil {
|
||||
if mongo.IsDuplicateKeyError(err) {
|
||||
if err := wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return cv.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CacheMgo) Unlock(ctx context.Context, key string, value string) error {
|
||||
return mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": x.lockKey(key), "value": value})
|
||||
}
|
133
pkg/common/storage/database/mgo/cache_test.go
Normal file
133
pkg/common/storage/database/mgo/cache_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func TestName1111(t *testing.T) {
|
||||
coll := Mongodb().Collection("temp")
|
||||
|
||||
//updatePipeline := mongo.Pipeline{
|
||||
// {
|
||||
// {"$set", bson.M{
|
||||
// "age": bson.M{
|
||||
// "$toString": bson.M{
|
||||
// "$add": bson.A{
|
||||
// bson.M{"$toInt": "$age"},
|
||||
// 1,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }},
|
||||
// },
|
||||
//}
|
||||
|
||||
pipeline := mongo.Pipeline{
|
||||
{
|
||||
{"$set", bson.M{
|
||||
"value": bson.M{
|
||||
"$toString": bson.M{
|
||||
"$add": bson.A{
|
||||
bson.M{"$toInt": "$value"},
|
||||
1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
opt := options.FindOneAndUpdate().SetUpsert(false).SetReturnDocument(options.After)
|
||||
res, err := mongoutil.FindOneAndUpdate[model.Cache](context.Background(), coll, bson.M{"key": "123456"}, pipeline, opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res)
|
||||
}
|
||||
|
||||
func TestName33333(t *testing.T) {
|
||||
c, err := NewCacheMgo(Mongodb())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := c.Set(context.Background(), "123456", "123456", time.Hour); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := c.Set(context.Background(), "123666", "123666", time.Hour); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
res1, err := c.Get(context.Background(), []string{"123456"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res1)
|
||||
|
||||
res2, err := c.Prefix(context.Background(), "123")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res2)
|
||||
}
|
||||
|
||||
func TestName1111aa(t *testing.T) {
|
||||
|
||||
c, err := NewCacheMgo(Mongodb())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var count int
|
||||
|
||||
key := "123456"
|
||||
|
||||
doFunc := func() {
|
||||
value, err := c.Lock(context.Background(), key, time.Second*30)
|
||||
if err != nil {
|
||||
t.Log("Lock error", err)
|
||||
return
|
||||
}
|
||||
tmp := count
|
||||
tmp++
|
||||
count = tmp
|
||||
t.Log("count", tmp)
|
||||
if err := c.Unlock(context.Background(), key, value); err != nil {
|
||||
t.Log("Unlock error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := c.Lock(context.Background(), key, time.Second*10); err != nil {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 32; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
doFunc()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func TestName111111a(t *testing.T) {
|
||||
arr := strings.SplitN("1:testkakskdask:1111", ":", 2)
|
||||
t.Log(arr)
|
||||
}
|
@ -2,16 +2,17 @@ package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func TestName1(t *testing.T) {
|
||||
@ -93,7 +94,7 @@ func TestName3(t *testing.T) {
|
||||
func TestName4(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
|
||||
defer cancel()
|
||||
cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.66:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
|
||||
cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
|
||||
|
||||
msg, err := NewMsgMongo(cli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
@ -109,6 +110,41 @@ func TestName4(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestName5(t *testing.T) {
|
||||
var v time.Time
|
||||
t.Log(v.UnixMilli())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
|
||||
defer cancel()
|
||||
cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
|
||||
|
||||
tmp, err := NewMsgMongo(cli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := tmp.(*MsgMgo)
|
||||
ts := time.Now().Add(-time.Hour * 24 * 5).UnixMilli()
|
||||
t.Log(ts)
|
||||
var seqs []int64
|
||||
for i := 1; i < 256; i++ {
|
||||
seqs = append(seqs, int64(i))
|
||||
}
|
||||
res, err := msg.FindSeqs(ctx, "si_4924054191_9511766539", seqs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res)
|
||||
}
|
||||
|
||||
//func TestName6(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), time.Second*300)
|
||||
// defer cancel()
|
||||
// cli := Result(mongo.Connect(ctx, options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second)))
|
||||
//
|
||||
// tmp, err := NewMsgMongo(cli.Database("openim_v3"))
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// msg := tmp.(*MsgMgo)
|
||||
// seq, sendTime, err := msg.findBeforeSendTime(ctx, "si_4924054191_9511766539", 1144)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// t.Log(seq, sendTime)
|
||||
//}
|
||||
|
@ -2,10 +2,11 @@ package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func Result[V any](val V, err error) V {
|
||||
@ -19,7 +20,7 @@ func Mongodb() *mongo.Database {
|
||||
return Result(
|
||||
mongo.Connect(context.Background(),
|
||||
options.Client().
|
||||
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
|
||||
ApplyURI("mongodb://openIM:openIM123@172.16.8.135:37017/openim_v3?maxPoolSize=100").
|
||||
SetConnectTimeout(5*time.Second)),
|
||||
).Database("openim_v3")
|
||||
}
|
||||
|
@ -18,4 +18,5 @@ const (
|
||||
SeqConversationName = "seq"
|
||||
SeqUserName = "seq_user"
|
||||
StreamMsgName = "stream_msg"
|
||||
CacheName = "cache"
|
||||
)
|
||||
|
9
pkg/common/storage/model/cache.go
Normal file
9
pkg/common/storage/model/cache.go
Normal file
@ -0,0 +1,9 @@
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
type Cache struct {
|
||||
Key string `bson:"key"`
|
||||
Value string `bson:"value"`
|
||||
ExpireAt *time.Time `bson:"expire_at"`
|
||||
}
|
25
pkg/dbbuild/builder.go
Normal file
25
pkg/dbbuild/builder.go
Normal file
@ -0,0 +1,25 @@
|
||||
package dbbuild
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type Builder interface {
|
||||
Mongo(ctx context.Context) (*mongoutil.Client, error)
|
||||
Redis(ctx context.Context) (redis.UniversalClient, error)
|
||||
}
|
||||
|
||||
func NewBuilder(mongoConf *config.Mongo, redisConf *config.Redis) Builder {
|
||||
if config.Standalone() {
|
||||
globalStandalone.setConfig(mongoConf, redisConf)
|
||||
return globalStandalone
|
||||
}
|
||||
return µservices{
|
||||
mongo: mongoConf,
|
||||
redis: redisConf,
|
||||
}
|
||||
}
|
26
pkg/dbbuild/microservices.go
Normal file
26
pkg/dbbuild/microservices.go
Normal file
@ -0,0 +1,26 @@
|
||||
package dbbuild
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type microservices struct {
|
||||
mongo *config.Mongo
|
||||
redis *config.Redis
|
||||
}
|
||||
|
||||
func (x *microservices) Mongo(ctx context.Context) (*mongoutil.Client, error) {
|
||||
return mongoutil.NewMongoDB(ctx, x.mongo.Build())
|
||||
}
|
||||
|
||||
func (x *microservices) Redis(ctx context.Context) (redis.UniversalClient, error) {
|
||||
if x.redis.Disable {
|
||||
return nil, nil
|
||||
}
|
||||
return redisutil.NewRedisClient(ctx, x.redis.Build())
|
||||
}
|
76
pkg/dbbuild/standalone.go
Normal file
76
pkg/dbbuild/standalone.go
Normal file
@ -0,0 +1,76 @@
|
||||
package dbbuild
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
standaloneMongo = "mongo"
|
||||
standaloneRedis = "redis"
|
||||
)
|
||||
|
||||
var globalStandalone = &standalone{}
|
||||
|
||||
type standaloneConn[C any] struct {
|
||||
Conn C
|
||||
Err error
|
||||
}
|
||||
|
||||
func (x *standaloneConn[C]) result() (C, error) {
|
||||
return x.Conn, x.Err
|
||||
}
|
||||
|
||||
type standalone struct {
|
||||
lock sync.Mutex
|
||||
mongo *config.Mongo
|
||||
redis *config.Redis
|
||||
conn map[string]any
|
||||
}
|
||||
|
||||
func (x *standalone) setConfig(mongoConf *config.Mongo, redisConf *config.Redis) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
x.mongo = mongoConf
|
||||
x.redis = redisConf
|
||||
}
|
||||
|
||||
func (x *standalone) Mongo(ctx context.Context) (*mongoutil.Client, error) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.conn == nil {
|
||||
x.conn = make(map[string]any)
|
||||
}
|
||||
v, ok := x.conn[standaloneMongo]
|
||||
if !ok {
|
||||
var val standaloneConn[*mongoutil.Client]
|
||||
val.Conn, val.Err = mongoutil.NewMongoDB(ctx, x.mongo.Build())
|
||||
v = &val
|
||||
x.conn[standaloneMongo] = v
|
||||
}
|
||||
return v.(*standaloneConn[*mongoutil.Client]).result()
|
||||
}
|
||||
|
||||
func (x *standalone) Redis(ctx context.Context) (redis.UniversalClient, error) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.redis.Disable {
|
||||
return nil, nil
|
||||
}
|
||||
if x.conn == nil {
|
||||
x.conn = make(map[string]any)
|
||||
}
|
||||
v, ok := x.conn[standaloneRedis]
|
||||
if !ok {
|
||||
var val standaloneConn[redis.UniversalClient]
|
||||
val.Conn, val.Err = redisutil.NewRedisClient(ctx, x.redis.Build())
|
||||
v = &val
|
||||
x.conn[standaloneRedis] = v
|
||||
}
|
||||
return v.(*standaloneConn[redis.UniversalClient]).result()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user