mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-04-06 04:15:46 +08:00
* feat: update group notification when set to null. (#2590) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * feat: update group notification when set to null. * update log standard. * feat: add long time push msg in prometheus (#2584) * feat: add long time push msg in prometheus * fix: log print * fix: go mod * fix: log msg * fix: log init * feat: push msg * feat: go mod ,remove cgo package * feat: remove error log * feat: test dummy push * feat:redis pool config * feat: push to kafka log * feat: supports getting messages based on session ID and seq (#2582) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * feat: implement request batch count limit. (#2591) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * fix: getting messages based on session ID and seq (#2595) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * feat: avoid pulling messages from sessions with a large number of max seq values of 0 (#2602) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * refactor: improve db structure in `storage/controller` (#2604) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * refactor: improve db structure in `storage/controller` * feat: implement offline push using kafka (#2600) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * feat: implement offline push. * feat: implement batch Push spilt * update go mod * feat: implement kafka producer and consumer. * update format, * add PushMQ log. * feat: update Handler logic. * update MQ logic. * update * update * fix: update OfflinePushConsumerHandler. * feat: API supports gzip (#2609) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * Fix err (#2608) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * feat: add rocksTimeout * feat: wrap logs * feat: add logs * feat: listen config * feat: enable listen TIME_WAIT port * feat: add logs * feat: cache batch * chore: enable fullUserCache * feat: push rpc num * feat: push err * feat: with operationID * feat: sleep * feat: change 1s * feat: change log * feat: implement Getbatch in rpcCache. * feat: print getOnline cost * feat: change log * feat: change kafka and push config * feat: del interface * feat: fix err * feat: change config * feat: go mod * feat: change config * feat: change config * feat: add sleep in push * feat: warn logs * feat: logs * feat: logs * feat: change port * feat: start config * feat: remove port reuse * feat: prometheus config * feat: prometheus config * feat: prometheus config * feat: add long time send msg to grafana * feat: init * feat: init * feat: implement offline push. * feat: batch get user online * feat: implement batch Push spilt * update go mod * Revert "feat: change port" This reverts commit 06d5e944 * feat: change port * feat: change config * feat: implement kafka producer and consumer. * update format, * add PushMQ log. * feat: get all online users and init push * feat: lock in online cache * feat: config * fix: init online status * fix: add logs * fix: userIDs * fix: add logs * feat: update Handler logic. * update MQ logic. * update * update * fix: method name * fix: update OfflinePushConsumerHandler. * fix: prommetrics * fix: add logs * fix: ctx * fix: log * fix: config * feat: change port * fix: atomic online cache status --------- Co-authored-by: Monet Lee <monet_lee@163.com> * feature: add GetConversationsHasReadAndMaxSeq interface to the WebSocket API. (#2611) * fix: lru lock (#2613) * fix: lru lock * fix: lru lock * fix: lru lock * fix: nil pointer error on close (#2618) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip * go.mod * fix: nil pointer error on close --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * feat: create group can push notification (#2617) * fix: blockage caused by listen error (#2620) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip * go.mod * fix: nil pointer error on close * fix: listen error --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * fix: go.mod (#2621) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip * go.mod * fix: nil pointer error on close * fix: listen error * fix: listen error * update go.mod --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * feat: improve searchMsg implement. (#2614) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * remove unused script. * feat: improve searchMsg implement. * update mongo config. * Fix lock (#2622) * fix:log * fix: lock * fix: update setGroupInfoEX field name. (#2625) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * fix: update setGroupInfoEX field name. * fix: update setGroupInfoEX field name (#2626) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * fix: update setGroupInfoEX field name. * fix: update setGroupInfoEX field name * feat: msg gateway add log (#2631) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip * go.mod * fix: nil pointer error on close * fix: listen error * fix: listen error * update go.mod * feat: add log --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * fix: update setGroupInfoEx func name and field. (#2634) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * fix: update setGroupInfoEx func name and field. * refactor: update groupinfoEx field. * refactor: update database name in mongodb.yml * add groupName Condition * fix: fix setConversations req fill. (#2645) * refactor: refactor workflows contents. * add tool workflows. * update field. * fix: remove chat error. * Fix err. * fix error. * remove cn comment. * update workflows files. * update infra config. * move workflows. * feat: update bot. * fix: solve uncorrect outdated msg get. * update get docIDs logic. * update * update skip logic. * fix * update. * fix: delay deleteObject func. * remove unused content. * update log type. * feat: implement request batch count limit. * update * update * fix: fix setConversations req fill. * fix: GetMsgBySeqs boundary issues (#2647) * fix: GroupApplicationAcceptedNotification * fix: GroupApplicationAcceptedNotification * fix: NotificationUserInfoUpdate * cicd: robot automated Change * fix: component * fix: getConversationInfo * feat: cron task * feat: cron task * feat: cron task * feat: cron task * feat: cron task * fix: minio config url recognition error * update gomake version * update gomake version * fix: seq conversion bug * fix: redis pipe exec * fix: ImportFriends * fix: A large number of logs keysAndValues length is not even * feat: mark read aggregate write * feat: online status supports redis cluster * feat: online status supports redis cluster * feat: online status supports redis cluster * merge * merge * read seq is written to mongo * read seq is written to mongo * fix: invitation to join group notification * fix: friend op_user_id * feat: optimizing asynchronous context * feat: optimizing memamq size * feat: add GetSeqMessage * feat: GroupApplicationAgreeMemberEnterNotification * feat: GroupApplicationAgreeMemberEnterNotification * feat: go.mod * feat: go.mod * feat: join group notification and get seq * feat: join group notification and get seq * feat: avoid pulling messages from sessions with a large number of max seq values of 0 * feat: API supports gzip * go.mod * fix: nil pointer error on close * fix: listen error * fix: listen error * update go.mod * feat: add log * fix: token parse token value * fix: GetMsgBySeqs boundary issues --------- Co-authored-by: withchao <withchao@users.noreply.github.com> * fix: the attribute version is obsolete, remove it (#2644) * refactor: update Userregister request field. (#2650) --------- Co-authored-by: Monet Lee <monet_lee@163.com> Co-authored-by: icey-yu <119291641+icey-yu@users.noreply.github.com> Co-authored-by: chao <48119764+withchao@users.noreply.github.com> Co-authored-by: withchao <withchao@users.noreply.github.com> Co-authored-by: 蔡相跃 <caixiangyue007@gmail.com>
331 lines
9.4 KiB
Go
331 lines
9.4 KiB
Go
package rpccache
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"github.com/openimsdk/protocol/constant"
|
|
"github.com/openimsdk/protocol/user"
|
|
"math/rand"
|
|
"strconv"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
|
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
|
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
|
|
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
|
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
|
|
"github.com/openimsdk/tools/db/cacheutil"
|
|
"github.com/openimsdk/tools/log"
|
|
"github.com/openimsdk/tools/mcontext"
|
|
"github.com/redis/go-redis/v9"
|
|
)
|
|
|
|
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) {
|
|
l := &sync.Mutex{}
|
|
x := &OnlineCache{
|
|
user: user,
|
|
group: group,
|
|
fullUserCache: fullUserCache,
|
|
Lock: l,
|
|
Cond: sync.NewCond(l),
|
|
}
|
|
|
|
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
|
|
|
|
switch x.fullUserCache {
|
|
case true:
|
|
log.ZDebug(ctx, "fullUserCache is true")
|
|
x.mapCache = cacheutil.NewCache[string, []int32]()
|
|
go func() {
|
|
if err := x.initUsersOnlineStatus(ctx); err != nil {
|
|
log.ZError(ctx, "initUsersOnlineStatus failed", err)
|
|
}
|
|
}()
|
|
case false:
|
|
log.ZDebug(ctx, "fullUserCache is false")
|
|
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
|
|
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
|
|
})
|
|
x.CurrentPhase.Store(DoSubscribeOver)
|
|
x.Cond.Broadcast()
|
|
}
|
|
|
|
go func() {
|
|
x.doSubscribe(ctx, rdb, fn)
|
|
}()
|
|
return x, nil
|
|
}
|
|
|
|
const (
|
|
Begin uint32 = iota
|
|
DoOnlineStatusOver
|
|
DoSubscribeOver
|
|
)
|
|
|
|
type OnlineCache struct {
|
|
user rpcclient.UserRpcClient
|
|
group *GroupLocalCache
|
|
|
|
// fullUserCache if enabled, caches the online status of all users using mapCache;
|
|
// otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache.
|
|
fullUserCache bool
|
|
|
|
lruCache lru.LRU[string, []int32]
|
|
mapCache *cacheutil.Cache[string, []int32]
|
|
|
|
Lock *sync.Mutex
|
|
Cond *sync.Cond
|
|
CurrentPhase atomic.Uint32
|
|
}
|
|
|
|
func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) {
|
|
log.ZDebug(ctx, "init users online status begin")
|
|
|
|
var (
|
|
totalSet atomic.Int64
|
|
maxTries = 5
|
|
retryInterval = time.Second * 5
|
|
|
|
resp *user.GetAllOnlineUsersResp
|
|
)
|
|
|
|
defer func(t time.Time) {
|
|
log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load())
|
|
o.CurrentPhase.Store(DoOnlineStatusOver)
|
|
o.Cond.Broadcast()
|
|
}(time.Now())
|
|
|
|
retryOperation := func(operation func() error, operationName string) error {
|
|
for i := 0; i < maxTries; i++ {
|
|
if err = operation(); err != nil {
|
|
log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err)
|
|
time.Sleep(retryInterval)
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
return err
|
|
}
|
|
|
|
cursor := uint64(0)
|
|
for resp == nil || resp.NextCursor != 0 {
|
|
if err = retryOperation(func() error {
|
|
resp, err = o.user.GetAllOnlineUsers(ctx, cursor)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, u := range resp.StatusList {
|
|
if u.Status == constant.Online {
|
|
o.setUserOnline(u.UserID, u.PlatformIDs)
|
|
}
|
|
totalSet.Add(1)
|
|
}
|
|
cursor = resp.NextCursor
|
|
return nil
|
|
}, "getAllOnlineUsers"); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) {
|
|
o.Lock.Lock()
|
|
ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel()
|
|
for o.CurrentPhase.Load() < DoOnlineStatusOver {
|
|
o.Cond.Wait()
|
|
}
|
|
o.Lock.Unlock()
|
|
log.ZInfo(ctx, "begin doSubscribe")
|
|
|
|
doMessage := func(message *redis.Message) {
|
|
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
|
|
if err != nil {
|
|
log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
|
|
return
|
|
}
|
|
log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs)
|
|
switch o.fullUserCache {
|
|
case true:
|
|
if len(platformIDs) == 0 {
|
|
// offline
|
|
o.mapCache.Delete(userID)
|
|
} else {
|
|
o.mapCache.Store(userID, platformIDs)
|
|
}
|
|
case false:
|
|
storageCache := o.setHasUserOnline(userID, platformIDs)
|
|
log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
|
|
if fn != nil {
|
|
fn(ctx, userID, platformIDs)
|
|
}
|
|
}
|
|
}
|
|
|
|
if o.CurrentPhase.Load() == DoOnlineStatusOver {
|
|
for done := false; !done; {
|
|
select {
|
|
case message := <-ch:
|
|
doMessage(message)
|
|
default:
|
|
o.CurrentPhase.Store(DoSubscribeOver)
|
|
o.Cond.Broadcast()
|
|
done = true
|
|
}
|
|
}
|
|
}
|
|
|
|
for message := range ch {
|
|
doMessage(message)
|
|
}
|
|
}
|
|
|
|
func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
|
|
platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) {
|
|
return o.user.GetUserOnlinePlatform(ctx, userID)
|
|
})
|
|
if err != nil {
|
|
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID)
|
|
return nil, err
|
|
}
|
|
//log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
|
|
return platformIDs, nil
|
|
}
|
|
|
|
func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
|
|
platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
tmp := make([]int32, len(platformIDs))
|
|
copy(tmp, platformIDs)
|
|
return platformIDs, nil
|
|
}
|
|
|
|
// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) {
|
|
// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs)
|
|
// if err != nil {
|
|
// return nil, err
|
|
// }
|
|
// tmp := make([]int32, len(platformIDs))
|
|
// copy(tmp, platformIDs)
|
|
// return platformIDs, nil
|
|
// }
|
|
|
|
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
|
|
platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
return len(platformIDs) > 0, nil
|
|
}
|
|
|
|
func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) {
|
|
platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) {
|
|
platformIDsMap := make(map[string][]int32)
|
|
|
|
usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
for _, u := range usersStatus {
|
|
platformIDsMap[u.UserID] = u.PlatformIDs
|
|
}
|
|
|
|
return platformIDsMap, nil
|
|
})
|
|
if err != nil {
|
|
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs)
|
|
return nil, err
|
|
}
|
|
return platformIDsMap, nil
|
|
}
|
|
|
|
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) {
|
|
t := time.Now()
|
|
|
|
var (
|
|
onlineUserIDs = make([]string, 0, len(userIDs))
|
|
offlineUserIDs = make([]string, 0, len(userIDs))
|
|
)
|
|
|
|
switch o.fullUserCache {
|
|
case true:
|
|
for _, userID := range userIDs {
|
|
if _, ok := o.mapCache.Load(userID); ok {
|
|
onlineUserIDs = append(onlineUserIDs, userID)
|
|
} else {
|
|
offlineUserIDs = append(offlineUserIDs, userID)
|
|
}
|
|
}
|
|
case false:
|
|
userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
for key, value := range userOnlineMap {
|
|
if len(value) > 0 {
|
|
onlineUserIDs = append(onlineUserIDs, key)
|
|
} else {
|
|
offlineUserIDs = append(offlineUserIDs, key)
|
|
}
|
|
}
|
|
}
|
|
|
|
log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t))
|
|
return userIDs, offlineUserIDs, nil
|
|
}
|
|
|
|
//func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
|
|
// onlineUserIDs := make([]string, 0, len(userIDs))
|
|
// for _, userID := range userIDs {
|
|
// online, err := o.GetUserOnline(ctx, userID)
|
|
// if err != nil {
|
|
// return nil, err
|
|
// }
|
|
// if online {
|
|
// onlineUserIDs = append(onlineUserIDs, userID)
|
|
// }
|
|
// }
|
|
// log.ZDebug(ctx, "OnlineCache GetUsersOnline", "userIDs", userIDs, "onlineUserIDs", onlineUserIDs)
|
|
// return onlineUserIDs, nil
|
|
//}
|
|
//
|
|
//func (o *OnlineCache) GetGroupOnline(ctx context.Context, groupID string) ([]string, error) {
|
|
// userIDs, err := o.group.GetGroupMemberIDs(ctx, groupID)
|
|
// if err != nil {
|
|
// return nil, err
|
|
// }
|
|
// var onlineUserIDs []string
|
|
// for _, userID := range userIDs {
|
|
// online, err := o.GetUserOnline(ctx, userID)
|
|
// if err != nil {
|
|
// return nil, err
|
|
// }
|
|
// if online {
|
|
// onlineUserIDs = append(onlineUserIDs, userID)
|
|
// }
|
|
// }
|
|
// log.ZDebug(ctx, "OnlineCache GetGroupOnline", "groupID", groupID, "onlineUserIDs", onlineUserIDs, "allUserID", userIDs)
|
|
// return onlineUserIDs, nil
|
|
//}
|
|
|
|
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) {
|
|
switch o.fullUserCache {
|
|
case true:
|
|
o.mapCache.Store(userID, platformIDs)
|
|
case false:
|
|
o.lruCache.Set(userID, platformIDs)
|
|
}
|
|
}
|
|
|
|
func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool {
|
|
return o.lruCache.SetHas(userID, platformIDs)
|
|
}
|