mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-12-08 13:37:13 +08:00
optimize scheduled deletion
This commit is contained in:
parent
3229cfe328
commit
ce9883cf26
@ -82,7 +82,7 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
|
||||
apiURL: apiURL,
|
||||
thirdDatabase: controller.NewThirdDatabase(cache.NewMsgCacheModel(rdb), db),
|
||||
userRpcClient: rpcclient.NewUserRpcClient(client),
|
||||
s3dataBase: controller.NewS3Database(o, relation.NewObjectInfo(db)),
|
||||
s3dataBase: controller.NewS3Database(rdb, o, relation.NewObjectInfo(db)),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
})
|
||||
return nil
|
||||
|
||||
166
pkg/common/db/cache/s3.go
vendored
Normal file
166
pkg/common/db/cache/s3.go
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ObjectCache interface {
|
||||
metaCache
|
||||
GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error)
|
||||
DelObjectName(names ...string) ObjectCache
|
||||
}
|
||||
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB relationtb.ObjectInfoModelInterface) ObjectCache {
|
||||
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
return &objectCacheRedis{
|
||||
rcClient: rcClient,
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
}
|
||||
}
|
||||
|
||||
type objectCacheRedis struct {
|
||||
metaCache
|
||||
objDB relationtb.ObjectInfoModelInterface
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) NewCache() ObjectCache {
|
||||
return &objectCacheRedis{
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
objDB: g.objDB,
|
||||
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) DelObjectName(names ...string) ObjectCache {
|
||||
objectCache := g.NewCache()
|
||||
keys := make([]string, 0, len(names))
|
||||
for _, name := range names {
|
||||
keys = append(keys, g.getObjectKey(name))
|
||||
}
|
||||
objectCache.AddKeys(keys...)
|
||||
return objectCache
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) getObjectKey(name string) string {
|
||||
return "OBJECT_INFO:" + name
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) GetName(ctx context.Context, name string) (*relationtb.ObjectModel, error) {
|
||||
return getCache(ctx, g.rcClient, g.getObjectKey(name), g.expireTime, func(ctx context.Context) (*relationtb.ObjectModel, error) {
|
||||
return g.objDB.Take(ctx, name)
|
||||
})
|
||||
}
|
||||
|
||||
type S3Cache interface {
|
||||
metaCache
|
||||
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
|
||||
DelS3Key(engine string, keys ...string) S3Cache
|
||||
}
|
||||
|
||||
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) S3Cache {
|
||||
rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
return &s3CacheRedis{
|
||||
rcClient: rcClient,
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
metaCache: NewMetaCacheRedis(rcClient),
|
||||
}
|
||||
}
|
||||
|
||||
type s3CacheRedis struct {
|
||||
metaCache
|
||||
s3 s3.Interface
|
||||
rcClient *rockscache.Client
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) NewCache() S3Cache {
|
||||
return &s3CacheRedis{
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
s3: g.s3,
|
||||
metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) DelS3Key(engine string, keys ...string) S3Cache {
|
||||
s3cache := g.NewCache()
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getS3Key(engine, key))
|
||||
}
|
||||
s3cache.AddKeys(ks...)
|
||||
return s3cache
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
|
||||
return "S3:" + engine + ":" + name
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
|
||||
return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
|
||||
return g.s3.StatObject(ctx, name)
|
||||
})
|
||||
}
|
||||
|
||||
//type MinioCache interface {
|
||||
// metaCache
|
||||
// GetThumbnailKey(ctx context.Context, key string) (string, error)
|
||||
// //DelS3Key(engine string, keys ...string) S3Cache
|
||||
//}
|
||||
//
|
||||
//func NewMinioCache(rdb redis.UniversalClient, s3 s3.Interface) MinioCache {
|
||||
// rcClient := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
// return &minioCacheRedis{
|
||||
// rcClient: rcClient,
|
||||
// expireTime: time.Hour * 12,
|
||||
// s3: s3,
|
||||
// metaCache: NewMetaCacheRedis(rcClient),
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//type minioCacheRedis struct {
|
||||
// metaCache
|
||||
// s3 s3.Interface
|
||||
// rcClient *rockscache.Client
|
||||
// expireTime time.Duration
|
||||
//}
|
||||
//
|
||||
//func (g *minioCacheRedis) NewCache() MinioCache {
|
||||
// return &minioCacheRedis{
|
||||
// rcClient: g.rcClient,
|
||||
// expireTime: g.expireTime,
|
||||
// s3: g.s3,
|
||||
// metaCache: NewMetaCacheRedis(g.rcClient, g.metaCache.GetPreDelKeys()...),
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (g *minioCacheRedis) DelS3Key(engine string, keys ...string) S3Cache {
|
||||
// s3cache := g.NewCache()
|
||||
// ks := make([]string, 0, len(keys))
|
||||
// for _, key := range keys {
|
||||
// ks = append(ks, g.getS3Key(engine, key))
|
||||
// }
|
||||
// s3cache.AddKeys(ks...)
|
||||
// return s3cache
|
||||
//}
|
||||
//
|
||||
//func (g *minioCacheRedis) getMinioImageInfoKey(name string) string {
|
||||
// return "MINIO:" + ":" + name
|
||||
//}
|
||||
//
|
||||
//func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, name string) (string, error) {
|
||||
// return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
|
||||
// return g.s3.StatObject(ctx, name)
|
||||
// })
|
||||
//}
|
||||
@ -16,12 +16,13 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type S3Database interface {
|
||||
@ -34,16 +35,18 @@ type S3Database interface {
|
||||
SetObject(ctx context.Context, info *relation.ObjectModel) error
|
||||
}
|
||||
|
||||
func NewS3Database(s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
|
||||
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj relation.ObjectInfoModelInterface) S3Database {
|
||||
return &s3Database{
|
||||
s3: cont.New(s3),
|
||||
obj: obj,
|
||||
s3: cont.New(cache.NewS3Cache(rdb, s3), s3),
|
||||
cache: cache.NewObjectCacheRedis(rdb, obj),
|
||||
db: obj,
|
||||
}
|
||||
}
|
||||
|
||||
type s3Database struct {
|
||||
s3 *cont.Controller
|
||||
obj relation.ObjectInfoModelInterface
|
||||
s3 *cont.Controller
|
||||
cache cache.ObjectCache
|
||||
db relation.ObjectInfoModelInterface
|
||||
}
|
||||
|
||||
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
@ -67,11 +70,14 @@ func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID strin
|
||||
}
|
||||
|
||||
func (s *s3Database) SetObject(ctx context.Context, info *relation.ObjectModel) error {
|
||||
return s.obj.SetObject(ctx, info)
|
||||
if err := s.db.SetObject(ctx, info); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cache.DelObjectName(info.Name).ExecDel(ctx)
|
||||
}
|
||||
|
||||
func (s *s3Database) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) {
|
||||
obj, err := s.obj.Take(ctx, name)
|
||||
obj, err := s.cache.GetName(ctx, name)
|
||||
if err != nil {
|
||||
return time.Time{}, "", err
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@ -32,12 +33,16 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
)
|
||||
|
||||
func New(impl s3.Interface) *Controller {
|
||||
return &Controller{impl: impl}
|
||||
func New(cache cache.S3Cache, impl s3.Interface) *Controller {
|
||||
return &Controller{
|
||||
cache: cache,
|
||||
impl: impl,
|
||||
}
|
||||
}
|
||||
|
||||
type Controller struct {
|
||||
impl s3.Interface
|
||||
cache cache.S3Cache
|
||||
impl s3.Interface
|
||||
}
|
||||
|
||||
func (c *Controller) HashPath(md5 string) string {
|
||||
@ -69,8 +74,12 @@ func (c *Controller) PartLimit() *s3.PartLimit {
|
||||
return c.impl.PartLimit()
|
||||
}
|
||||
|
||||
func (c *Controller) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
|
||||
return c.cache.GetKey(ctx, c.impl.Engine(), name)
|
||||
}
|
||||
|
||||
func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.ObjectInfo, error) {
|
||||
return c.impl.StatObject(ctx, c.HashPath(hash))
|
||||
return c.StatObject(ctx, c.HashPath(hash))
|
||||
}
|
||||
|
||||
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
|
||||
@ -94,7 +103,7 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
|
||||
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
|
||||
return nil, errors.New(fmt.Sprintf("too many parts: %d", partNumber))
|
||||
}
|
||||
if info, err := c.impl.StatObject(ctx, c.HashPath(hash)); err == nil {
|
||||
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
|
||||
return nil, &HashAlreadyExistsError{Object: info}
|
||||
} else if !c.impl.IsNotFound(err) {
|
||||
return nil, err
|
||||
@ -168,13 +177,13 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
|
||||
fmt.Println("CompleteUpload sum:", hex.EncodeToString(md5Sum[:]), "upload hash:", upload.Hash)
|
||||
return nil, errors.New("md5 mismatching")
|
||||
}
|
||||
if info, err := c.impl.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
|
||||
if info, err := c.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
|
||||
return &UploadResult{
|
||||
Key: info.Key,
|
||||
Size: info.Size,
|
||||
Hash: info.ETag,
|
||||
}, nil
|
||||
} else if !c.impl.IsNotFound(err) {
|
||||
} else if !c.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
cleanObject := make(map[string]struct{})
|
||||
@ -200,7 +209,7 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
|
||||
}
|
||||
targetKey = result.Key
|
||||
case UploadTypePresigned:
|
||||
uploadInfo, err := c.impl.StatObject(ctx, upload.Key)
|
||||
uploadInfo, err := c.StatObject(ctx, upload.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -230,6 +239,9 @@ func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHa
|
||||
default:
|
||||
return nil, errors.New("invalid upload id type")
|
||||
}
|
||||
if err := c.cache.DelS3Key(c.impl.Engine(), targetKey).ExecDel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &UploadResult{
|
||||
Key: targetKey,
|
||||
Size: upload.Size,
|
||||
@ -253,7 +265,7 @@ func (c *Controller) AuthSign(ctx context.Context, uploadID string, partNumbers
|
||||
}
|
||||
|
||||
func (c *Controller) IsNotFound(err error) bool {
|
||||
return c.impl.IsNotFound(err)
|
||||
return c.impl.IsNotFound(err) || errs.ErrRecordNotFound.Is(err)
|
||||
}
|
||||
|
||||
func (c *Controller) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"image"
|
||||
"image/gif"
|
||||
"image/jpeg"
|
||||
@ -124,6 +125,7 @@ type Minio struct {
|
||||
lock sync.Locker
|
||||
init bool
|
||||
prefix string
|
||||
rcClient *rockscache.Client
|
||||
}
|
||||
|
||||
func (m *Minio) initMinio(ctx context.Context) error {
|
||||
@ -543,3 +545,8 @@ func (m *Minio) getObjectData(ctx context.Context, name string, limit int64) ([]
|
||||
}
|
||||
return io.ReadAll(io.LimitReader(object, 1024))
|
||||
}
|
||||
|
||||
func (m *Minio) GetThumbnailKey(ctx context.Context, name string) (string, error) {
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user