This commit is contained in:
withchao 2023-04-18 18:19:25 +08:00
parent 5da12839fb
commit 36da39e5ab
2 changed files with 1 additions and 42 deletions

View File

@ -4,9 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"runtime/debug"
"strings"
"time" "time"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/log" "github.com/OpenIMSDK/Open-IM-Server/pkg/common/log"
@ -69,105 +66,67 @@ func GetDefaultOpt() rockscache.Options {
func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) { func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
var t T var t T
var arr []string
arr = append(arr, "-------------------getCache---------------------")
arr = append(arr, time.Now().String())
arr = append(arr, fmt.Sprintf("key: <%s> expire: <%s>", key, expire.String()))
defer func() {
arr = append(arr, fmt.Sprintf("%#v", t))
arr = append(arr, string(debug.Stack()))
arr = append(arr, "----------------------------------------")
fmt.Println(strings.Join(arr, "\n"))
}()
var write bool var write bool
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) { v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
arr = append(arr, "find db")
t, err = fn(ctx) t, err = fn(ctx)
if err != nil { if err != nil {
arr = append(arr, fmt.Sprintf("fn error %s", err))
return "", err return "", err
} }
arr = append(arr, fmt.Sprintf("fn value %#v", t))
bs, err := json.Marshal(t) bs, err := json.Marshal(t)
if err != nil { if err != nil {
arr = append(arr, fmt.Sprintf("json marshal %s", err))
return "", utils.Wrap(err, "") return "", utils.Wrap(err, "")
} }
write = true write = true
arr = append(arr, fmt.Sprintf("json value %d", len(bs)))
arr = append(arr, string(bs))
arr = append(arr, "****************")
return string(bs), nil return string(bs), nil
}) })
if err != nil { if err != nil {
arr = append(arr, "fetch error "+err.Error())
return t, err return t, err
} }
if write { if write {
arr = append(arr, "first return")
return t, nil return t, nil
} }
err = json.Unmarshal([]byte(v), &t) err = json.Unmarshal([]byte(v), &t)
if err != nil { if err != nil {
arr = append(arr, "json.Unmarshal error "+err.Error())
log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire) log.ZError(ctx, "cache json.Unmarshal failed", err, "key", key, "value", v, "expire", expire)
return t, utils.Wrap(err, "") return t, utils.Wrap(err, "")
} }
arr = append(arr, "success")
return t, nil return t, nil
} }
func batchGetCache[T any](ctx context.Context, rcClient *rockscache.Client, keys []string, expire time.Duration, keyIndexFn func(t T, keys []string) (int, error), fn func(ctx context.Context) ([]T, error)) ([]T, error) { func batchGetCache[T any](ctx context.Context, rcClient *rockscache.Client, keys []string, expire time.Duration, keyIndexFn func(t T, keys []string) (int, error), fn func(ctx context.Context) ([]T, error)) ([]T, error) {
var arr []string
arr = append(arr, "-------------------batchGetCache---------------------")
arr = append(arr, time.Now().String())
arr = append(arr, fmt.Sprintf("keys: <%#v> expire: <%s>", keys, expire.String()))
defer func() {
arr = append(arr, string(debug.Stack()))
arr = append(arr, "----------------------------------------")
fmt.Println(strings.Join(arr, "\n"))
}()
batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) { batchMap, err := rcClient.FetchBatch2(ctx, keys, expire, func(idxs []int) (m map[int]string, err error) {
values := make(map[int]string) values := make(map[int]string)
tArrays, err := fn(ctx) tArrays, err := fn(ctx)
if err != nil { if err != nil {
arr = append(arr, "fn error "+err.Error())
return nil, err return nil, err
} }
for _, v := range tArrays { for _, v := range tArrays {
index, err := keyIndexFn(v, keys) index, err := keyIndexFn(v, keys)
if err != nil { if err != nil {
arr = append(arr, "keyIndexFn continue "+err.Error())
continue continue
} }
bs, err := json.Marshal(v) bs, err := json.Marshal(v)
if err != nil { if err != nil {
arr = append(arr, "json.Marshal "+err.Error())
return nil, utils.Wrap(err, "marshal failed") return nil, utils.Wrap(err, "marshal failed")
} }
values[index] = string(bs) values[index] = string(bs)
} }
arr = append(arr, fmt.Sprintf("rcClient.FetchBatch2 %#v", values))
return values, nil return values, nil
}) })
if err != nil { if err != nil {
arr = append(arr, "rcClient.FetchBatch2 error "+err.Error())
return nil, err return nil, err
} }
arr = append(arr, fmt.Sprintf("rcClient.FetchBatch2 %#v", batchMap))
var tArrays []T var tArrays []T
for _, v := range batchMap { for _, v := range batchMap {
if v != "" { if v != "" {
var t T var t T
err = json.Unmarshal([]byte(v), &t) err = json.Unmarshal([]byte(v), &t)
if err != nil { if err != nil {
arr = append(arr, "json.Unmarshal error "+err.Error())
return nil, utils.Wrap(err, "unmarshal failed") return nil, utils.Wrap(err, "unmarshal failed")
} }
tArrays = append(tArrays, t) tArrays = append(tArrays, t)
} }
} }
arr = append(arr, fmt.Sprintf("tArrays %#v", tArrays))
return tArrays, nil return tArrays, nil
} }

View File

@ -128,7 +128,7 @@ func (g *groupDatabase) CreateGroup(ctx context.Context, groups []*relationTb.Gr
cache = cache.DelGroupMemberIDs(groupMember.GroupID).DelGroupMembersHash(groupMember.GroupID).DelJoinedGroupID(groupMember.UserID).DelGroupsMemberNum(groupMember.GroupID) cache = cache.DelGroupMemberIDs(groupMember.GroupID).DelGroupMembersHash(groupMember.GroupID).DelJoinedGroupID(groupMember.UserID).DelGroupsMemberNum(groupMember.GroupID)
} }
} }
return g.cache.ExecDel(ctx) return cache.ExecDel(ctx)
}) })
} }