Merge branch 'online' into allmerge

# Conflicts:
#	go.mod
#	go.sum
This commit is contained in:
withchao 2024-07-10 15:20:41 +08:00
commit 65d4fc7f81
36 changed files with 1103 additions and 364 deletions

View File

@ -25,5 +25,4 @@ func main() {
if err := cmd.NewApiCmd().Exec(); err != nil { if err := cmd.NewApiCmd().Exec(); err != nil {
program.ExitWithError(err) program.ExitWithError(err)
} }
} }

2
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/gorilla/websocket v1.5.1 github.com/gorilla/websocket v1.5.1
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.69-alpha.22 github.com/openimsdk/protocol v0.0.69-alpha.24
github.com/openimsdk/tools v0.0.49-alpha.45 github.com/openimsdk/tools v0.0.49-alpha.45
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0

2
go.sum
View File

@ -262,8 +262,6 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.69-alpha.22 h1:kifZWVNDkg9diXFJUJ/Q9xFc80cveBhc+1dUXcE9xHQ=
github.com/openimsdk/protocol v0.0.69-alpha.22/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
github.com/openimsdk/tools v0.0.49-alpha.45 h1:XIzCoef4myybOiIlGuRY9FTtGBisZFC4Uy4PhG0ZWQ0= github.com/openimsdk/tools v0.0.49-alpha.45 h1:XIzCoef4myybOiIlGuRY9FTtGBisZFC4Uy4PhG0ZWQ0=
github.com/openimsdk/tools v0.0.49-alpha.45/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8= github.com/openimsdk/tools v0.0.49-alpha.45/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=

View File

@ -72,6 +72,8 @@ type Client struct {
closed atomic.Bool closed atomic.Bool
closedErr error closedErr error
token string token string
subLock sync.Mutex
subUserIDs map[string]struct{}
} }
// ResetClient updates the client's state with new connection and context information. // ResetClient updates the client's state with new connection and context information.
@ -202,6 +204,8 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq) resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus: case WsSetBackgroundStatus:
resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq) resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq)
case WsSubUserOnlineStatus:
resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq)
default: default:
return fmt.Errorf( return fmt.Errorf(
"ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d", "ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d",

View File

@ -16,10 +16,10 @@ package msggateway
import ( import (
"crypto/rand" "crypto/rand"
"github.com/stretchr/testify/assert"
"sync" "sync"
"testing" "testing"
"unsafe"
"github.com/stretchr/testify/assert"
) )
func mockRandom() []byte { func mockRandom() []byte {
@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) {
assert.Equal(b, nil, err) assert.Equal(b, nil, err)
} }
} }
func TestName(t *testing.T) {
t.Log(unsafe.Sizeof(Client{}))
}

View File

@ -43,6 +43,7 @@ const (
WSKickOnlineMsg = 2002 WSKickOnlineMsg = 2002
WsLogoutMsg = 2003 WsLogoutMsg = 2003
WsSetBackgroundStatus = 2004 WsSetBackgroundStatus = 2004
WsSubUserOnlineStatus = 2005
WSDataError = 3001 WSDataError = 3001
) )

View File

@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc" "github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway" "github.com/openimsdk/protocol/msggateway"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
@ -31,6 +32,10 @@ import (
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error { func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
s.LongConnServer.SetDiscoveryRegistry(disCov, config) s.LongConnServer.SetDiscoveryRegistry(disCov, config)
msggateway.RegisterMsgGatewayServer(server, s) msggateway.RegisterMsgGatewayServer(server, s)
s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
if s.ready != nil {
return s.ready(s)
}
return nil return nil
} }
@ -50,18 +55,21 @@ type Server struct {
LongConnServer LongConnServer LongConnServer LongConnServer
config *Config config *Config
pushTerminal map[int]struct{} pushTerminal map[int]struct{}
ready func(srv *Server) error
userRcp rpcclient.UserRpcClient
} }
func (s *Server) SetLongConnServer(LongConnServer LongConnServer) { func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
s.LongConnServer = LongConnServer s.LongConnServer = LongConnServer
} }
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server { func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server {
s := &Server{ s := &Server{
rpcPort: rpcPort, rpcPort: rpcPort,
LongConnServer: longConnServer, LongConnServer: longConnServer,
pushTerminal: make(map[int]struct{}), pushTerminal: make(map[int]struct{}),
config: conf, config: conf,
ready: ready,
} }
s.pushTerminal[constant.IOSPlatformID] = struct{}{} s.pushTerminal[constant.IOSPlatformID] = struct{}{}
s.pushTerminal[constant.AndroidPlatformID] = struct{}{} s.pushTerminal[constant.AndroidPlatformID] = struct{}{}

View File

@ -17,6 +17,8 @@ package msggateway
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"time" "time"
@ -26,6 +28,7 @@ import (
type Config struct { type Config struct {
MsgGateway config.MsgGateway MsgGateway config.MsgGateway
Share config.Share Share config.Share
RedisConfig config.Redis
WebhooksConfig config.Webhooks WebhooksConfig config.Webhooks
Discovery config.Discovery Discovery config.Discovery
} }
@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error {
if err != nil { if err != nil {
return err return err
} }
longServer, err := NewWsServer( rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
if err != nil {
return err
}
longServer := NewWsServer(
conf, conf,
WithPort(wsPort), WithPort(wsPort),
WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)), WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second), WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen), WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
) )
if err != nil {
return err
}
hubServer := NewServer(rpcPort, longServer, conf) hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
return nil
})
go longServer.ChangeOnlineStatus(4)
netDone := make(chan error) netDone := make(chan error)
go func() { go func() {
err = hubServer.Start(ctx, index, conf) err = hubServer.Start(ctx, index, conf)

View File

@ -18,6 +18,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
pbAuth "github.com/openimsdk/protocol/auth" pbAuth "github.com/openimsdk/protocol/auth"
"github.com/openimsdk/tools/mcontext" "github.com/openimsdk/tools/mcontext"
"net/http" "net/http"
@ -48,6 +49,7 @@ type LongConnServer interface {
KickUserConn(client *Client) error KickUserConn(client *Client) error
UnRegister(c *Client) UnRegister(c *Client)
SetKickHandlerInfo(i *kickHandler) SetKickHandlerInfo(i *kickHandler)
SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error)
Compressor Compressor
Encoder Encoder
MessageHandler MessageHandler
@ -60,7 +62,9 @@ type WsServer struct {
registerChan chan *Client registerChan chan *Client
unregisterChan chan *Client unregisterChan chan *Client
kickHandlerChan chan *kickHandler kickHandlerChan chan *kickHandler
clients *UserMap clients UserMap
online *rpccache.OnlineCache
subscription *Subscription
clientPool sync.Pool clientPool sync.Pool
onlineUserNum atomic.Int64 onlineUserNum atomic.Int64
onlineUserConnNum atomic.Int64 onlineUserConnNum atomic.Int64
@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry,
ws.disCov = disCov ws.disCov = disCov
} }
func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) { //func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID) // err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
if err != nil { // if err != nil {
log.ZWarn(ctx, "SetUserStatus err", err) // log.ZWarn(ctx, "SetUserStatus err", err)
} // }
switch status { // switch status {
case constant.Online: // case constant.Online:
ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID()) // ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
case constant.Offline: // case constant.Offline:
ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID()) // ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
} // }
} //}
func (ws *WsServer) UnRegister(c *Client) { func (ws *WsServer) UnRegister(c *Client) {
ws.unregisterChan <- c ws.unregisterChan <- c
@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client,
return ws.clients.Get(userID, platform) return ws.clients.Get(userID, platform)
} }
func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) { func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
var config configs var config configs
for _, o := range opts { for _, o := range opts {
o(&config) o(&config)
} }
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
v := validator.New() v := validator.New()
return &WsServer{ return &WsServer{
msgGatewayConfig: msgGatewayConfig, msgGatewayConfig: msgGatewayConfig,
@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
kickHandlerChan: make(chan *kickHandler, 1000), kickHandlerChan: make(chan *kickHandler, 1000),
validate: v, validate: v,
clients: newUserMap(), clients: newUserMap(),
subscription: newSubscription(),
Compressor: NewGzipCompressor(), Compressor: NewGzipCompressor(),
Encoder: NewGobEncoder(), Encoder: NewGobEncoder(),
webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL), webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL),
}, nil }
} }
func (ws *WsServer) Run(done chan error) error { func (ws *WsServer) Run(done chan error) error {
@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) {
}() }()
} }
wg.Add(1) //wg.Add(1)
go func() { //go func() {
defer wg.Done() // defer wg.Done()
ws.SetUserOnlineStatus(client.ctx, client, constant.Online) // ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
}() //}()
wg.Wait() wg.Wait()
@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string {
} }
func (ws *WsServer) KickUserConn(client *Client) error { func (ws *WsServer) KickUserConn(client *Client) error {
ws.clients.deleteClients(client.UserID, []*Client{client}) ws.clients.DeleteClients(client.UserID, []*Client{client})
return client.KickOnlineMessage() return client.KickOnlineMessage()
} }
@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if !clientOK { if !clientOK {
return return
} }
ws.clients.deleteClients(newClient.UserID, oldClients) ws.clients.DeleteClients(newClient.UserID, oldClients)
for _, c := range oldClients { for _, c := range oldClients {
err := c.KickOnlineMessage() err := c.KickOnlineMessage()
if err != nil { if err != nil {
@ -345,13 +352,16 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
func (ws *WsServer) unregisterClient(client *Client) { func (ws *WsServer) unregisterClient(client *Client) {
defer ws.clientPool.Put(client) defer ws.clientPool.Put(client)
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr()) isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client})
if isDeleteUser { if isDeleteUser {
ws.onlineUserNum.Add(-1) ws.onlineUserNum.Add(-1)
prommetrics.OnlineUserGauge.Dec() prommetrics.OnlineUserGauge.Dec()
} }
ws.onlineUserConnNum.Add(-1) ws.onlineUserConnNum.Add(-1)
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline) client.subLock.Lock()
clear(client.subUserIDs)
client.subLock.Unlock()
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num", log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num", ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(), ws.onlineUserConnNum.Load(),

View File

@ -0,0 +1,112 @@
package msggateway
import (
"context"
"crypto/md5"
"encoding/binary"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
pbuser "github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/datautil"
"math/rand"
"strconv"
"time"
)
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
if concurrent < 1 {
concurrent = 1
}
const renewalTime = cachekey.OnlineExpire / 3
//const renewalTime = time.Second * 10
renewalTicker := time.NewTicker(renewalTime)
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
changeStatus := make([][]UserState, concurrent)
for i := 0; i < concurrent; i++ {
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
changeStatus[i] = make([]UserState, 0, 100)
}
mergeTicker := time.NewTicker(time.Second)
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
return &pbuser.UserOnlineStatus{
UserID: u.UserID,
Online: u.Online,
Offline: u.Offline,
}
}
rNum := rand.Uint64()
pushUserState := func(us ...UserState) {
for _, u := range us {
sum := md5.Sum([]byte(u.UserID))
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
changeStatus[i] = append(changeStatus[i], u)
status := changeStatus[i]
if len(status) == cap(status) {
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
}
pushAllUserState := func() {
for i, status := range changeStatus {
if len(status) == 0 {
continue
}
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
defer cancel()
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
log.ZError(ctx, "update user online status", err)
}
}
for i := 0; i < concurrent; i++ {
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
for req := range ch {
doRequest(req)
}
}(requestChs[i])
}
for {
select {
case <-mergeTicker.C:
pushAllUserState()
case now := <-renewalTicker.C:
deadline := now.Add(-cachekey.OnlineExpire / 3)
users := ws.clients.GetAllUserStatus(deadline, now)
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
pushUserState(users...)
case state := <-ws.clients.UserState():
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
pushUserState(state)
}
}
}

View File

@ -0,0 +1,181 @@
package msggateway
import (
"context"
"encoding/json"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/idutil"
"google.golang.org/protobuf/proto"
"sync"
"time"
)
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
if ws.clients.RecvSubChange(userID, platformIDs) {
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
} else {
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
}
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
}
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
var sub sdkws.SubUserOnlineStatus
if err := proto.Unmarshal(data.Data, &sub); err != nil {
return nil, err
}
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
var resp sdkws.SubUserOnlineStatusTips
if len(sub.SubscribeUserID) > 0 {
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
for _, userID := range sub.SubscribeUserID {
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
if err != nil {
return nil, err
}
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
UserID: userID,
OnlinePlatformIDs: platformIDs,
})
}
}
return proto.Marshal(&resp)
}
type subClient struct {
clients map[string]*Client
}
func newSubscription() *Subscription {
return &Subscription{
userIDs: make(map[string]*subClient),
}
}
type Subscription struct {
lock sync.RWMutex
userIDs map[string]*subClient
}
func (s *Subscription) GetClient(userID string) []*Client {
s.lock.RLock()
defer s.lock.RUnlock()
cs, ok := s.userIDs[userID]
if !ok {
return nil
}
clients := make([]*Client, 0, len(cs.clients))
for _, client := range cs.clients {
clients = append(clients, client)
}
return clients
}
func (s *Subscription) DelClient(client *Client) {
client.subLock.Lock()
userIDs := datautil.Keys(client.subUserIDs)
for _, userID := range userIDs {
delete(client.subUserIDs, userID)
}
client.subLock.Unlock()
if len(userIDs) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for _, userID := range userIDs {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
}
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
if len(addUserIDs)+len(delUserIDs) == 0 {
return
}
var (
del = make(map[string]struct{})
add = make(map[string]struct{})
)
client.subLock.Lock()
for _, userID := range delUserIDs {
if _, ok := client.subUserIDs[userID]; !ok {
continue
}
del[userID] = struct{}{}
delete(client.subUserIDs, userID)
}
for _, userID := range addUserIDs {
delete(del, userID)
if _, ok := client.subUserIDs[userID]; ok {
continue
}
client.subUserIDs[userID] = struct{}{}
}
client.subLock.Unlock()
if len(del)+len(add) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for userID := range del {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
for userID := range add {
sub, ok := s.userIDs[userID]
if !ok {
sub = &subClient{clients: make(map[string]*Client)}
s.userIDs[userID] = sub
}
sub.clients[addr] = client
}
}
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
clients := ws.subscription.GetClient(userID)
if len(clients) == 0 {
return
}
msgContent, err := json.Marshal(platformIDs)
if err != nil {
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
return
}
now := time.Now().UnixMilli()
msgID := idutil.GetMsgIDByMD5(userID)
msg := &sdkws.MsgData{
SendID: userID,
ClientMsgID: msgID,
ServerMsgID: msgID,
SenderPlatformID: constant.AdminPlatformID,
SessionType: constant.NotificationChatType,
ContentType: constant.UserSubscribeOnlineStatusNotification,
Content: msgContent,
SendTime: now,
CreateTime: now,
}
for _, client := range clients {
msg.RecvID = client.UserID
if err := client.PushMessage(ctx, msg); err != nil {
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent)
}
}
}

View File

@ -1,135 +1,185 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package msggateway package msggateway
import ( import (
"context"
"sync"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"sync"
"time"
) )
type UserMap struct { type UserMap interface {
m sync.Map GetAll(userID string) ([]*Client, bool)
Get(userID string, platformID int) ([]*Client, bool, bool)
Set(userID string, v *Client)
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
UserState() <-chan UserState
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
RecvSubChange(userID string, platformIDs []int32) bool
} }
func newUserMap() *UserMap { type UserState struct {
return &UserMap{} UserID string
Online []int32
Offline []int32
} }
func (u *UserMap) GetAll(key string) ([]*Client, bool) { type UserPlatform struct {
allClients, ok := u.m.Load(key) Time time.Time
Clients []*Client
}
func (u *UserPlatform) PlatformIDs() []int32 {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make([]int32, 0, len(u.Clients))
for _, client := range u.Clients {
platformIDs = append(platformIDs, int32(client.PlatformID))
}
return platformIDs
}
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make(map[int32]struct{})
for _, client := range u.Clients {
platformIDs[int32(client.PlatformID)] = struct{}{}
}
return platformIDs
}
func newUserMap() UserMap {
return &userMap{
data: make(map[string]*UserPlatform),
ch: make(chan UserState, 10000),
}
}
type userMap struct {
lock sync.RWMutex
data map[string]*UserPlatform
ch chan UserState
}
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return false
}
localPlatformIDs := result.PlatformIDSet()
for _, platformID := range platformIDs {
delete(localPlatformIDs, platformID)
}
if len(localPlatformIDs) == 0 {
return false
}
u.push(userID, result, nil)
return true
}
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
select {
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
userPlatform.Time = time.Now()
return true
default:
return false
}
}
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false
}
return result.Clients, true
}
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false, false
}
var clients []*Client
for _, client := range result.Clients {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
return clients, true, len(clients) > 0
}
func (u *userMap) Set(userID string, client *Client) {
u.lock.Lock()
defer u.lock.Unlock()
result, ok := u.data[userID]
if ok { if ok {
return allClients.([]*Client), ok result.Clients = append(result.Clients, client)
}
return nil, ok
}
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
allClients, userExisted := u.m.Load(key)
if userExisted {
var clients []*Client
for _, client := range allClients.([]*Client) {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
if len(clients) > 0 {
return clients, userExisted, true
}
return clients, userExisted, false
}
return nil, userExisted, false
}
// Set adds a client to the map.
func (u *UserMap) Set(key string, v *Client) {
allClients, existed := u.m.Load(key)
if existed {
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
oldClients := allClients.([]*Client)
oldClients = append(oldClients, v)
u.m.Store(key, oldClients)
} else { } else {
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID) result = &UserPlatform{
Clients: []*Client{client},
var clients []*Client }
clients = append(clients, v) u.data[userID] = result
u.m.Store(key, clients)
} }
u.push(client.UserID, result, nil)
} }
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) { func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
// Attempt to load the clients associated with the key. if len(clients) == 0 {
allClients, existed := u.m.Load(key)
if !existed {
// Return false immediately if the key does not exist.
return false return false
} }
u.lock.Lock()
// Convert allClients to a slice of *Client. defer u.lock.Unlock()
oldClients := allClients.([]*Client) result, ok := u.data[userID]
var remainingClients []*Client if !ok {
for _, client := range oldClients { return false
// Keep clients that do not match the connRemoteAddr.
if client.ctx.GetRemoteAddr() != connRemoteAddr {
remainingClients = append(remainingClients, client)
}
} }
offline := make([]int32, 0, len(clients))
// If no clients remain after filtering, delete the key from the map. deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
if len(remainingClients) == 0 { return client.ctx.GetRemoteAddr()
u.m.Delete(key)
return true
}
// Otherwise, update the key with the remaining clients.
u.m.Store(key, remainingClients)
return false
}
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
return c.ctx.GetRemoteAddr(), struct{}{}
}) })
allClients, existed := u.m.Load(key) tmp := result.Clients
if !existed { result.Clients = result.Clients[:0]
// If the key doesn't exist, return false. for _, client := range tmp {
return false if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
} offline = append(offline, int32(client.PlatformID))
} else {
// Filter out clients that are in the deleteMap. result.Clients = append(result.Clients, client)
oldClients := allClients.([]*Client)
var remainingClients []*Client
for _, client := range oldClients {
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
remainingClients = append(remainingClients, client)
} }
} }
defer u.push(userID, result, offline)
// Update or delete the key based on the remaining clients. if len(result.Clients) > 0 {
if len(remainingClients) == 0 { return false
u.m.Delete(key)
return true
} }
delete(u.data, userID)
u.m.Store(key, remainingClients) return true
return false
} }
func (u *UserMap) DeleteAll(key string) { func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
u.m.Delete(key) u.lock.RLock()
defer u.lock.RUnlock()
result := make([]UserState, 0, len(u.data))
for userID, userPlatform := range u.data {
if userPlatform.Time.Before(deadline) {
continue
}
userPlatform.Time = nowtime
online := make([]int32, 0, len(userPlatform.Clients))
for _, client := range userPlatform.Clients {
online = append(online, int32(client.PlatformID))
}
result = append(result, UserState{UserID: userID, Online: online})
}
return result
}
func (u *userMap) UserState() <-chan UserState {
return u.ch
} }

View File

@ -28,6 +28,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil" "github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg" pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push" pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws" "github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
@ -45,6 +46,7 @@ type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher onlinePusher OnlinePusher
onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache conversationLocalCache *rpccache.ConversationLocalCache
msgRpcClient rpcclient.MessageRpcClient msgRpcClient rpcclient.MessageRpcClient
@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil { if err != nil {
return nil, err return nil, err
} }
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config) consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group) consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb) consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation) consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
&config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL) consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config consumerHandler.config = config
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
return &consumerHandler, nil return &consumerHandler, nil
} }
@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
} }
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType. // Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error { func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String()) log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil { if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err return err
} }
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil { if err != nil {
return err return err
} }
@ -179,6 +182,38 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
return true return true
} }
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var (
onlineUserIDs []string
offlineUserIDs []string
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 {
var err error
result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return nil, err
}
}
for _, userID := range offlineUserIDs {
result = append(result, &msggateway.SingleMsgToUserResults{
UserID: userID,
})
}
return result, nil
}
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) { func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID) log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
var pushToUserIDs []string var pushToUserIDs []string
@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
return err return err
} }
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs) wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil { if err != nil {
return err return err
} }

122
internal/rpc/user/online.go Normal file
View File

@ -0,0 +1,122 @@
package user
import (
"context"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
pbuser "github.com/openimsdk/protocol/user"
)
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
platformIDs, err := s.online.GetOnline(ctx, userID)
if err != nil {
return nil, err
}
status := pbuser.OnlineStatus{
UserID: userID,
PlatformIDs: platformIDs,
}
if len(platformIDs) > 0 {
status.Status = constant.Online
} else {
status.Status = constant.Offline
}
return &status, nil
}
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs {
status, err := s.getUserOnlineStatus(ctx, userID)
if err != nil {
return nil, err
}
res = append(res, status)
}
return res, nil
}
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
if req.Genre == constant.SubscriberUser {
err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: res}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
var (
online []int32
offline []int32
)
switch req.Status {
case constant.Online:
online = []int32{req.PlatformID}
case constant.Offline:
online = []int32{req.PlatformID}
}
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
for _, status := range req.Status {
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
return nil, err
}
}
return &pbuser.SetUserOnlineStatusResp{}, nil
}

View File

@ -19,6 +19,7 @@ import (
"errors" "errors"
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend" "github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
@ -50,6 +51,7 @@ import (
) )
type userServer struct { type userServer struct {
online cache.OnlineCache
db controller.UserDatabase db controller.UserDatabase
friendNotificationSender *friend.FriendNotificationSender friendNotificationSender *friend.FriendNotificationSender
userNotificationSender *UserNotificationSender userNotificationSender *UserNotificationSender
@ -60,11 +62,6 @@ type userServer struct {
webhookClient *webhook.Client webhookClient *webhook.Client
} }
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
//TODO implement me
panic("implement me")
}
type Config struct { type Config struct {
RpcConfig config.User RpcConfig config.User
RedisConfig config.Redis RedisConfig config.Redis
@ -103,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg) msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
localcache.InitLocalCache(&config.LocalCacheConfig) localcache.InitLocalCache(&config.LocalCacheConfig)
u := &userServer{ u := &userServer{
online: redis.NewUserOnline(rdb),
db: database, db: database,
RegisterCenter: client, RegisterCenter: client,
friendRpcClient: &friendRpcClient, friendRpcClient: &friendRpcClient,
@ -334,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR
return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil
} }
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) {
if req.Genre == constant.SubscriberUser {
err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp,
err error) {
onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp,
err error) {
err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID)
if err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context,
req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.db.GetUserStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
// ProcessUserCommandAdd user general function add. // ProcessUserCommandAdd user general function add.
func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) { func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) {
err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID) err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID)

View File

@ -17,9 +17,6 @@ package tools
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/protocol/msg" "github.com/openimsdk/protocol/msg"
@ -28,6 +25,8 @@ import (
"github.com/openimsdk/tools/mw" "github.com/openimsdk/tools/mw"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"os"
"time"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"

View File

@ -37,6 +37,7 @@ func NewMsgGatewayCmd() *MsgGatewayCmd {
ret.configMap = map[string]any{ ret.configMap = map[string]any{
OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway, OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway,
ShareFileName: &msgGatewayConfig.Share, ShareFileName: &msgGatewayConfig.Share,
RedisConfigFileName: &msgGatewayConfig.RedisConfig,
WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig, WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig,
DiscoveryConfigFilename: &msgGatewayConfig.Discovery, DiscoveryConfigFilename: &msgGatewayConfig.Discovery,
} }

View File

@ -15,15 +15,14 @@
package config package config
import ( import (
"strings"
"time"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/cos" "github.com/openimsdk/tools/s3/cos"
"github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/s3/oss" "github.com/openimsdk/tools/s3/oss"
"strings"
"time"
) )
type CacheConfig struct { type CacheConfig struct {

View File

@ -0,0 +1,13 @@
package cachekey
import "time"
const (
OnlineKey = "ONLINE:"
OnlineChannel = "online_change"
OnlineExpire = time.Hour / 2
)
func GetOnlineKey(userID string) string {
return OnlineKey + userID
}

View File

@ -17,7 +17,6 @@ package cachekey
const ( const (
UserInfoKey = "USER_INFO:" UserInfoKey = "USER_INFO:"
UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:" UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:"
olineStatusKey = "ONLINE_STATUS:"
) )
func GetUserInfoKey(userID string) string { func GetUserInfoKey(userID string) string {
@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string {
func GetUserGlobalRecvMsgOptKey(userID string) string { func GetUserGlobalRecvMsgOptKey(userID string) string {
return UserGlobalRecvMsgOptKey + userID return UserGlobalRecvMsgOptKey + userID
} }
func GetOnlineStatusKey(modKey string) string {
return olineStatusKey + modKey
}

8
pkg/common/storage/cache/online.go vendored Normal file
View File

@ -0,0 +1,8 @@
package cache
import "context"
type OnlineCache interface {
GetOnline(ctx context.Context, userID string) ([]int32, error)
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
}

View File

@ -0,0 +1,89 @@
package redis
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/tools/errs"
"github.com/redis/go-redis/v9"
"strconv"
"time"
)
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
return &userOnline{
rdb: rdb,
expire: cachekey.OnlineExpire,
channelName: cachekey.OnlineChannel,
}
}
type userOnline struct {
rdb redis.UniversalClient
expire time.Duration
channelName string
}
func (s *userOnline) getUserOnlineKey(userID string) string {
return cachekey.GetOnlineKey(userID)
}
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
Min: strconv.FormatInt(time.Now().Unix(), 10),
Max: "+inf",
}).Result()
if err != nil {
return nil, errs.Wrap(err)
}
platformIDs := make([]int32, 0, len(members))
for _, member := range members {
val, err := strconv.Atoi(member)
if err != nil {
return nil, errs.Wrap(err)
}
platformIDs = append(platformIDs, int32(val))
}
return platformIDs, nil
}
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
script := `
local key = KEYS[1]
local score = ARGV[3]
local num1 = redis.call("ZCARD", key)
redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2])
for i = 5, tonumber(ARGV[4])+4 do
redis.call("ZREM", key, ARGV[i])
end
local num2 = redis.call("ZCARD", key)
for i = 5+tonumber(ARGV[4]), #ARGV do
redis.call("ZADD", key, score, ARGV[i])
end
redis.call("EXPIRE", key, ARGV[1])
local num3 = redis.call("ZCARD", key)
local change = (num1 ~= num2) or (num2 ~= num3)
if change then
local members = redis.call("ZRANGE", key, 0, -1)
table.insert(members, KEYS[2])
redis.call("PUBLISH", KEYS[3], table.concat(members, ":"))
return 1
else
return 0
end
`
now := time.Now()
argv := make([]any, 0, 2+len(online)+len(offline))
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
for _, platformID := range offline {
argv = append(argv, platformID)
}
for _, platformID := range online {
argv = append(argv, platformID)
}
keys := []string{s.getUserOnlineKey(userID), userID, s.channelName}
if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,79 @@
package redis
import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/redis/go-redis/v9"
"log"
"strconv"
"sync/atomic"
"testing"
"time"
)
func newTestOnline() *userOnline {
opt := &redis.Options{
Addr: "172.16.8.48:16379",
Password: "openIM123",
DB: 0,
}
rdb := redis.NewClient(opt)
if err := rdb.Ping(context.Background()).Err(); err != nil {
panic(err)
}
return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
}
func TestOnline(t *testing.T) {
ts := newTestOnline()
var count atomic.Int64
for i := 0; i < 64; i++ {
go func(userID string) {
var err error
for i := 0; ; i++ {
if i%2 == 0 {
err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
} else {
err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
}
if err != nil {
panic(err)
}
count.Add(1)
}
}(strconv.Itoa(10000 + i))
}
ticker := time.NewTicker(time.Second)
for range ticker.C {
t.Log(count.Swap(0))
}
}
func TestGetOnline(t *testing.T) {
ts := newTestOnline()
ctx := context.Background()
pIDs, err := ts.GetOnline(ctx, "10000")
if err != nil {
panic(err)
}
t.Log(pIDs)
}
func TestRecvOnline(t *testing.T) {
ts := newTestOnline()
ctx := context.Background()
pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
_, err := pubsub.Receive(ctx)
if err != nil {
log.Fatalf("Could not subscribe: %v", err)
}
ch := pubsub.Channel()
for msg := range ch {
fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
}
}

View File

@ -17,7 +17,6 @@ package redis
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"github.com/dtm-labs/rockscache" "github.com/dtm-labs/rockscache"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
@ -29,8 +28,6 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
"hash/crc32"
"strconv"
"time" "time"
) )
@ -61,9 +58,9 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache,
} }
} }
func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string { //func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string {
return cachekey.GetOnlineStatusKey(modKey) // return cachekey.GetOnlineStatusKey(modKey)
} //}
func (u *UserCacheRedis) CloneUserCache() cache.UserCache { func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
return &UserCacheRedis{ return &UserCacheRedis{
@ -131,97 +128,6 @@ func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserC
return cache return cache
} }
// GetUserStatus get user status.
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs {
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod))
var onlineStatus user.OnlineStatus
key := u.getOnlineStatusKey(modKey)
result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
// key or field does not exist
userStatus = append(userStatus, &user.OnlineStatus{
UserID: userID,
Status: constant.Offline,
PlatformIDs: nil,
})
continue
} else {
return nil, errs.Wrap(err)
}
}
err = json.Unmarshal([]byte(result), &onlineStatus)
if err != nil {
return nil, errs.Wrap(err)
}
onlineStatus.UserID = userID
onlineStatus.Status = constant.Online
userStatus = append(userStatus, &onlineStatus)
}
return userStatus, nil
}
// SetUserStatus Set the user status and save it in redis.
func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
modKey := strconv.Itoa(int(UserIDNum % statusMod))
key := u.getOnlineStatusKey(modKey)
log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key)
isNewKey, err := u.rdb.Exists(ctx, key).Result()
if err != nil {
return errs.Wrap(err)
}
if isNewKey == 0 {
if status == constant.Online {
onlineStatus := user.OnlineStatus{
UserID: userID,
Status: constant.Online,
PlatformIDs: []int32{platformID},
}
jsonData, err := json.Marshal(&onlineStatus)
if err != nil {
return errs.Wrap(err)
}
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
if err != nil {
return errs.Wrap(err)
}
u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
return nil
}
}
isNil := false
result, err := u.rdb.HGet(ctx, key, userID).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
isNil = true
} else {
return errs.Wrap(err)
}
}
if status == constant.Offline {
err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
if err != nil {
return err
}
} else {
err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
if err != nil {
return errs.Wrap(err)
}
}
return nil
}
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error { func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
if isNil { if isNil {
log.ZWarn(ctx, "this user not online,maybe trigger order not right", log.ZWarn(ctx, "this user not online,maybe trigger order not right",

View File

@ -17,7 +17,6 @@ package cache
import ( import (
"context" "context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/protocol/user"
) )
type UserCache interface { type UserCache interface {
@ -28,6 +27,6 @@ type UserCache interface {
DelUsersInfo(userIDs ...string) UserCache DelUsersInfo(userIDs ...string) UserCache
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error) GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) //GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error //SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
} }

View File

@ -70,10 +70,6 @@ type UserDatabase interface {
GetAllSubscribeList(ctx context.Context, userID string) ([]string, error) GetAllSubscribeList(ctx context.Context, userID string) ([]string, error)
// GetSubscribedList Get all subscribed lists // GetSubscribedList Get all subscribed lists
GetSubscribedList(ctx context.Context, userID string) ([]string, error) GetSubscribedList(ctx context.Context, userID string) ([]string, error)
// GetUserStatus Get the online status of the user
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
// SetUserStatus Set the user status and store the user status in redis
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
// CRUD user command // CRUD user command
AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error
@ -246,17 +242,6 @@ func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([]
return list, nil return list, nil
} }
// GetUserStatus get user status.
func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs)
return onlineStatusList, err
}
// SetUserStatus Set the user status and save it in redis.
func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
return u.cache.SetUserStatus(ctx, userID, status, platformID)
}
func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error { func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error {
return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex) return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex)
} }

View File

@ -31,6 +31,12 @@ type Cache[V any] interface {
Stop() Stop()
} }
func LRUStringHash(key string) uint64 {
h := fnv.New64a()
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
return h.Sum64()
}
func New[V any](opts ...Option) Cache[V] { func New[V any](opts ...Option) Cache[V] {
opt := defaultOption() opt := defaultOption()
for _, o := range opts { for _, o := range opts {
@ -49,11 +55,7 @@ func New[V any](opts ...Option) Cache[V] {
if opt.localSlotNum == 1 { if opt.localSlotNum == 1 {
c.local = createSimpleLRU() c.local = createSimpleLRU()
} else { } else {
c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 { c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, LRUStringHash, createSimpleLRU)
h := fnv.New64a()
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
return h.Sum64()
}, createSimpleLRU)
} }
if opt.linkSlotNum > 0 { if opt.linkSlotNum > 0 {
c.link = link.New(opt.linkSlotNum) c.link = link.New(opt.linkSlotNum)

View File

@ -20,6 +20,7 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
type LRU[K comparable, V any] interface { type LRU[K comparable, V any] interface {
Get(key K, fetch func() (V, error)) (V, error) Get(key K, fetch func() (V, error)) (V, error)
SetHas(key K, value V) bool
Del(key K) bool Del(key K) bool
Stop() Stop()
} }

View File

@ -89,5 +89,15 @@ func (x *ExpirationLRU[K, V]) Del(key K) bool {
return ok return ok
} }
func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()
if x.core.Contains(key) {
x.core.Add(key, &expirationLruItem[V]{value: value})
return true
}
return false
}
func (x *ExpirationLRU[K, V]) Stop() { func (x *ExpirationLRU[K, V]) Stop() {
} }

View File

@ -88,6 +88,28 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err return v.value, v.err
} }
//func (x *LayLRU[K, V]) Set(key K, value V) {
// x.lock.Lock()
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
// x.lock.Unlock()
//}
//
//func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock()
// defer x.lock.Unlock()
// return x.core.Contains(key)
//}
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()
if x.core.Contains(key) {
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
return true
}
return false
}
func (x *LayLRU[K, V]) Del(key K) bool { func (x *LayLRU[K, V]) Del(key K) bool {
x.lock.Lock() x.lock.Lock()
ok := x.core.Remove(key) ok := x.core.Remove(key)

View File

@ -40,6 +40,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return x.slots[x.getIndex(key)].Get(key, fetch) return x.slots[x.getIndex(key)].Get(key, fetch)
} }
func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
return x.slots[x.getIndex(key)].SetHas(key, value)
}
func (x *slotLRU[K, V]) Del(key K) bool { func (x *slotLRU[K, V]) Del(key K) bool {
return x.slots[x.getIndex(key)].Del(key) return x.slots[x.getIndex(key)].Del(key)
} }

View File

@ -30,7 +30,7 @@ func defaultOption() *option {
localSuccessTTL: time.Minute, localSuccessTTL: time.Minute,
localFailedTTL: time.Second * 5, localFailedTTL: time.Second * 5,
delFn: make([]func(ctx context.Context, key ...string), 0, 2), delFn: make([]func(ctx context.Context, key ...string), 0, 2),
target: emptyTarget{}, target: EmptyTarget{},
} }
} }
@ -123,14 +123,14 @@ func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option {
} }
} }
type emptyTarget struct{} type EmptyTarget struct{}
func (e emptyTarget) IncrGetHit() {} func (e EmptyTarget) IncrGetHit() {}
func (e emptyTarget) IncrGetSuccess() {} func (e EmptyTarget) IncrGetSuccess() {}
func (e emptyTarget) IncrGetFailed() {} func (e EmptyTarget) IncrGetFailed() {}
func (e emptyTarget) IncrDelHit() {} func (e EmptyTarget) IncrDelHit() {}
func (e emptyTarget) IncrDelNotFound() {} func (e EmptyTarget) IncrDelNotFound() {}

100
pkg/rpccache/online.go Normal file
View File

@ -0,0 +1,100 @@
package rpccache
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/redis/go-redis/v9"
"math/rand"
"strconv"
"time"
)
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
x := &OnlineCache{
user: user,
group: group,
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
}),
}
go func() {
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
if err != nil {
log.ZError(ctx, "OnlineCache redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
continue
}
storageCache := x.setUserOnline(userID, platformIDs)
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
if fn != nil {
fn(ctx, userID, platformIDs)
}
}
}()
return x
}
type OnlineCache struct {
user rpcclient.UserRpcClient
group *GroupLocalCache
local lru.LRU[string, []int32]
}
func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
return o.local.Get(userID, func() ([]int32, error) {
return o.user.GetUserOnlinePlatform(ctx, userID)
})
}
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
platformIDs, err := o.GetUserOnlinePlatform(ctx, userID)
if err != nil {
return false, err
}
return len(platformIDs) > 0, nil
}
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
onlineUserIDs := make([]string, 0, len(userIDs))
for _, userID := range userIDs {
online, err := o.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
}
}
log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs)
return onlineUserIDs, nil
}
func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) {
userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID)
if err != nil {
return nil, err
}
var onlineUserIDs []string
for _, userID := range userIDs {
online, err := o.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
}
}
log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs)
return onlineUserIDs, nil
}
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
return o.local.SetHas(userID, platformIDs)
}

View File

@ -110,3 +110,18 @@ func (u *UserLocalCache) GetUsersInfoMap(ctx context.Context, userIDs []string)
} }
return users, nil return users, nil
} }
//func (u *UserLocalCache) GetUserOnlinePlatform(ctx context.Context, userID string) (val []int32, err error) {
// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform req", "userID", userID)
// defer func() {
// if err == nil {
// log.ZDebug(ctx, "UserLocalCache GetUserOnlinePlatform return", "value", val)
// } else {
// log.ZError(ctx, "UserLocalCache GetUserOnlinePlatform return", err)
// }
// }()
// return localcache.AnyValue[[]int32](u.local.Get(ctx, cachekey.GetOnlineKey(userID), func(ctx context.Context) (any, error) {
// log.ZDebug(ctx, "UserLocalCache GetUserGlobalMsgRecvOpt rpc", "userID", userID)
// return u.client.GetUserGlobalMsgRecvOpt(ctx, userID)
// }))
//}

View File

@ -193,3 +193,25 @@ func (u *UserRpcClient) GetNotificationByID(ctx context.Context, userID string)
}) })
return err return err
} }
func (u *UserRpcClient) GetUsersOnlinePlatform(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
if len(userIDs) == 0 {
return nil, nil
}
resp, err := u.Client.GetUserStatus(ctx, &user.GetUserStatusReq{UserIDs: userIDs, UserID: u.imAdminUserID[0]})
if err != nil {
return nil, err
}
return resp.StatusList, nil
}
func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
resp, err := u.GetUsersOnlinePlatform(ctx, []string{userID})
if err != nil {
return nil, err
}
if len(resp) == 0 {
return nil, nil
}
return resp[0].PlatformIDs, nil
}

View File

@ -0,0 +1,27 @@
package useronline
import (
"errors"
"strconv"
"strings"
)
func ParseUserOnlineStatus(payload string) (string, []int32, error) {
arr := strings.Split(payload, ":")
if len(arr) == 0 {
return "", nil, errors.New("invalid data")
}
userID := arr[len(arr)-1]
if userID == "" {
return "", nil, errors.New("userID is empty")
}
platformIDs := make([]int32, len(arr)-1)
for i := range platformIDs {
platformID, err := strconv.Atoi(arr[i])
if err != nil {
return "", nil, err
}
platformIDs[i] = int32(platformID)
}
return userID, platformIDs, nil
}