chao 4aaf496086
feat: new features merged (#2409)
* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* mage

* optimization version log

* optimization version log

* sync

* sync

* sync

* group sync

* sync option

* sync option

* refactor: replace `friend` package with `realtion`.

* refactor: update lastest commit to relation.

* sync option

* sync option

* sync option

* sync

* sync

* go.mod

* seq

* update: go mod

* refactor: change incremental to full

* feat: get full friend user ids

* feat: api and config

* seq

* group version

* merge

* seq

* seq

* seq

* fix: sort by id avoid unstable sort friends.

* group

* group

* group

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* user version

* seq

* seq

* seq user

* user online

* implement minio expire delete.

* user online

* config

* fix

* fix

* implement minio expire delete logic.

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* feat: implement scheduled delete outdated object in minio.

* update gomake version

* update gomake version

* implement FindExpires pagination.

* remove unnesseary incr.

* fix uncorrect args call.

* online push

* online push

* online push

* resolving conflicts

* resolving conflicts

* test

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* rpc prommetrics

* rpc prommetrics

* online status

* online status

* online status

* online status

* sub

* conversation version incremental

* merge seq

* merge online

* merge online

* merge online

* merge seq

* GetOwnerConversation

* fix: change incremental syncer router name.

* rockscache batch get

* rockscache seq batch get

* fix: GetMsgDocModelByIndex bug

* update go.mod

* update go.mod

* merge

* feat: prometheus

* feat: prometheus

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>
Co-authored-by: Monet Lee <monet_lee@163.com>
Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com>
Co-authored-by: icey-yu <1186114839@qq.com>
2024-07-16 02:46:21 +00:00

95 lines
2.2 KiB
Go

package redis
import (
"context"
"encoding/json"
"github.com/dtm-labs/rockscache"
"github.com/redis/go-redis/v9"
"golang.org/x/sync/singleflight"
"time"
"unsafe"
)
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
type Client struct {
rdb redis.UniversalClient
_ rockscache.Options
_ singleflight.Group
}
return (*Client)(unsafe.Pointer(cli)).rdb
}
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
if len(ids) == 0 {
return nil, nil
}
findKeys := make([]string, 0, len(ids))
keyId := make(map[string]K)
for _, id := range ids {
key := idKey(id)
if _, ok := keyId[key]; ok {
continue
}
keyId[key] = id
findKeys = append(findKeys, key)
}
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
if err != nil {
return nil, err
}
result := make([]*V, 0, len(findKeys))
for _, keys := range slotKeys {
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
queryIds := make([]K, 0, len(idx))
idIndex := make(map[K]int)
for _, index := range idx {
id := keyId[keys[index]]
idIndex[id] = index
queryIds = append(queryIds, id)
}
values, err := fn(ctx, queryIds)
if err != nil {
return nil, err
}
if len(values) == 0 {
return map[int]string{}, nil
}
cacheIndex := make(map[int]string)
for _, value := range values {
id := vId(value)
index, ok := idIndex[id]
if !ok {
continue
}
bs, err := json.Marshal(value)
if err != nil {
return nil, err
}
cacheIndex[index] = string(bs)
}
return cacheIndex, nil
})
if err != nil {
return nil, err
}
for index, data := range indexCache {
if data == "" {
continue
}
var value V
if err := json.Unmarshal([]byte(data), &value); err != nil {
return nil, err
}
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
cb.BatchCache(keyId[keys[index]])
}
result = append(result, &value)
}
}
return result, nil
}
type BatchCacheCallback[K comparable] interface {
BatchCache(id K)
}