icey-yu 0276c7df60
Fix err (#2608)
* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: add rocksTimeout

* feat: wrap logs

* feat: add logs

* feat: listen config

* feat: enable listen TIME_WAIT port

* feat: add logs

* feat: cache batch

* chore: enable fullUserCache

* feat: push rpc num

* feat: push err

* feat: with operationID

* feat: sleep

* feat: change 1s

* feat: change log

* feat: implement Getbatch in rpcCache.

* feat: print getOnline cost

* feat: change log

* feat: change kafka and push config

* feat: del interface

* feat: fix err

* feat: change config

* feat: go mod

* feat: change config

* feat: change config

* feat: add sleep in push

* feat: warn logs

* feat: logs

* feat: logs

* feat: change port

* feat: start config

* feat: remove port reuse

* feat: prometheus config

* feat: prometheus config

* feat: prometheus config

* feat: add long time send msg to grafana

* feat: init

* feat: init

* feat: implement offline push.

* feat: batch get user online

* feat: implement batch Push spilt

* update go mod

* Revert "feat: change port"

This reverts commit 06d5e944

* feat: change port

* feat: change config

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: get all online users and init push

* feat: lock in online cache

* feat: config

* fix: init online status

* fix: add logs

* fix: userIDs

* fix: add logs

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: method name

* fix: update OfflinePushConsumerHandler.

* fix: prommetrics

* fix: add logs

* fix: ctx

* fix: log

* fix: config

* feat: change port

* fix: atomic online cache status

---------

Co-authored-by: Monet Lee <monet_lee@163.com>
2024-09-12 02:38:17 +00:00

99 lines
2.5 KiB
Go

package redis
import (
"context"
"encoding/json"
"github.com/dtm-labs/rockscache"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight"
"time"
"unsafe"
)
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
type Client struct {
rdb redis.UniversalClient
_ rockscache.Options
_ singleflight.Group
}
return (*Client)(unsafe.Pointer(cli)).rdb
}
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
if len(ids) == 0 {
return nil, nil
}
findKeys := make([]string, 0, len(ids))
keyId := make(map[string]K)
for _, id := range ids {
key := idKey(id)
if _, ok := keyId[key]; ok {
continue
}
keyId[key] = id
findKeys = append(findKeys, key)
}
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
if err != nil {
return nil, err
}
result := make([]*V, 0, len(findKeys))
for _, keys := range slotKeys {
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
queryIds := make([]K, 0, len(idx))
idIndex := make(map[K]int)
for _, index := range idx {
id := keyId[keys[index]]
idIndex[id] = index
queryIds = append(queryIds, id)
}
values, err := fn(ctx, queryIds)
if err != nil {
log.ZError(ctx, "batchGetCache query database failed", err, "keys", keys, "queryIds", queryIds)
return nil, err
}
if len(values) == 0 {
return map[int]string{}, nil
}
cacheIndex := make(map[int]string)
for _, value := range values {
id := vId(value)
index, ok := idIndex[id]
if !ok {
continue
}
bs, err := json.Marshal(value)
if err != nil {
log.ZError(ctx, "marshal failed", err)
return nil, err
}
cacheIndex[index] = string(bs)
}
return cacheIndex, nil
})
if err != nil {
return nil, errs.WrapMsg(err, "FetchBatch2 failed")
}
for index, data := range indexCache {
if data == "" {
continue
}
var value V
if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, errs.WrapMsg(err, "Unmarshal failed")
}
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]])
}
result = append(result, &value)
}
}
return result, nil
}
type BatchCacheCallback[K comparable] interface {
BatchCache(id K)
}