mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-12-02 18:34:29 +08:00
Merge branch 'code-conventions' of github.com:FGadvancer/Open-IM-Server into code-conventions
This commit is contained in:
commit
88415111fc
@ -23,18 +23,13 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/third"
|
||||
"github.com/OpenIMSDK/tools/errs"
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
"github.com/OpenIMSDK/tools/mcontext"
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3/cont"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
)
|
||||
@ -57,7 +52,8 @@ func (t *thirdServer) PartSize(ctx context.Context, req *third.PartSizeReq) (*th
|
||||
}
|
||||
|
||||
func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.InitiateMultipartUploadReq) (*third.InitiateMultipartUploadResp, error) {
|
||||
if err := checkUploadName(ctx, req.Name, t.config); err != nil {
|
||||
defer log.ZDebug(ctx, "return")
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expireTime := time.Now().Add(t.defaultExpire)
|
||||
@ -136,7 +132,7 @@ func (t *thirdServer) AuthSign(ctx context.Context, req *third.AuthSignReq) (*th
|
||||
|
||||
func (t *thirdServer) CompleteMultipartUpload(ctx context.Context, req *third.CompleteMultipartUploadReq) (*third.CompleteMultipartUploadResp, error) {
|
||||
defer log.ZDebug(ctx, "return")
|
||||
if err := checkUploadName(ctx, req.Name, t.config); err != nil {
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, err := t.s3dataBase.CompleteMultipartUpload(ctx, req.UploadID, req.Parts)
|
||||
@ -193,13 +189,13 @@ func (t *thirdServer) InitiateFormData(ctx context.Context, req *third.InitiateF
|
||||
if req.Size <= 0 {
|
||||
return nil, errs.ErrArgs.Wrap("size must be greater than 0")
|
||||
}
|
||||
if err := checkUploadName(ctx, req.Name, t.config); err != nil {
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var duration time.Duration
|
||||
opUserID := mcontext.GetOpUserID(ctx)
|
||||
var key string
|
||||
if authverify.IsManagerUserID(opUserID, t.config) {
|
||||
if t.IsManagerUserID(opUserID) {
|
||||
if req.Millisecond <= 0 {
|
||||
duration = time.Minute * 10
|
||||
} else {
|
||||
@ -259,7 +255,7 @@ func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteF
|
||||
if err := json.Unmarshal(data, &mate); err != nil {
|
||||
return nil, errs.ErrArgs.Wrap("invalid id " + err.Error())
|
||||
}
|
||||
if err := checkUploadName(ctx, mate.Name, t.config); err != nil {
|
||||
if err := t.checkUploadName(ctx, mate.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := t.s3dataBase.StatObject(ctx, mate.Key)
|
||||
@ -279,7 +275,7 @@ func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteF
|
||||
Group: mate.Group,
|
||||
CreateTime: time.Now(),
|
||||
}
|
||||
if err = t.s3dataBase.SetObject(ctx, obj); err != nil {
|
||||
if err := t.s3dataBase.SetObject(ctx, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.CompleteFormDataResp{Url: t.apiAddress(mate.Name)}, nil
|
||||
|
||||
@ -17,6 +17,7 @@ package third
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
@ -40,6 +41,7 @@ import (
|
||||
)
|
||||
|
||||
func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
log.ZDebug(context.Background(), "config19999999999999999999999999999999999", config, "javadfdas")
|
||||
|
||||
mongo, err := unrelation.NewMongo(config)
|
||||
if err != nil {
|
||||
@ -57,7 +59,7 @@ func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryReg
|
||||
if apiURL == "" {
|
||||
return fmt.Errorf("api url is empty")
|
||||
}
|
||||
if _, err := url.Parse(config.Object.ApiURL); err != nil {
|
||||
if _, parseErr := url.Parse(config.Object.ApiURL); parseErr != nil {
|
||||
return err
|
||||
}
|
||||
if apiURL[len(apiURL)-1] != '/' {
|
||||
@ -68,16 +70,16 @@ func Start(config *config.GlobalConfig, client discoveryregistry.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Select based on the configuration file strategy
|
||||
// 根据配置文件策略选择 oss 方式
|
||||
enable := config.Object.Enable
|
||||
var o s3.Interface
|
||||
switch config.Object.Enable {
|
||||
case "minio":
|
||||
o, err = minio.NewMinio(cache.NewMinioCache(rdb), config)
|
||||
o, err = minio.NewMinio(cache.NewMinioCache(rdb), minio.Config(config.Object.Minio))
|
||||
case "cos":
|
||||
o, err = cos.NewCos(config)
|
||||
o, err = cos.NewCos(cos.Config(config.Object.Cos))
|
||||
case "oss":
|
||||
o, err = oss.NewOSS(config)
|
||||
o, err = oss.NewOSS(oss.Config(config.Object.Oss))
|
||||
default:
|
||||
err = fmt.Errorf("invalid object enable: %s", enable)
|
||||
}
|
||||
|
||||
@ -18,14 +18,14 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/third"
|
||||
"github.com/OpenIMSDK/tools/errs"
|
||||
"github.com/OpenIMSDK/tools/mcontext"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
)
|
||||
|
||||
func toPbMapArray(m map[string][]string) []*third.KeyValues {
|
||||
@ -42,7 +42,7 @@ func toPbMapArray(m map[string][]string) []*third.KeyValues {
|
||||
return res
|
||||
}
|
||||
|
||||
func checkUploadName(ctx context.Context, name string, config *config.GlobalConfig) error {
|
||||
func (t *thirdServer) checkUploadName(ctx context.Context, name string) error {
|
||||
if name == "" {
|
||||
return errs.ErrArgs.Wrap("name is empty")
|
||||
}
|
||||
@ -56,7 +56,7 @@ func checkUploadName(ctx context.Context, name string, config *config.GlobalConf
|
||||
if opUserID == "" {
|
||||
return errs.ErrNoPermission.Wrap("opUserID is empty")
|
||||
}
|
||||
if !authverify.IsManagerUserID(opUserID, config) {
|
||||
if !authverify.IsManagerUserID(opUserID, t.config) {
|
||||
if !strings.HasPrefix(name, opUserID+"/") {
|
||||
return errs.ErrNoPermission.Wrap(fmt.Sprintf("name must start with `%s/`", opUserID))
|
||||
}
|
||||
@ -80,3 +80,7 @@ func checkValidObjectName(objectName string) error {
|
||||
}
|
||||
return checkValidObjectNamePrefix(objectName)
|
||||
}
|
||||
|
||||
func (t *thirdServer) IsManagerUserID(opUserID string) bool {
|
||||
return authverify.IsManagerUserID(opUserID, t.config)
|
||||
}
|
||||
|
||||
@ -1,282 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// docURL: https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
sdk "github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
)
|
||||
|
||||
const (
|
||||
minPartSize int64 = 1024 * 1024 * 1 // 1MB
|
||||
maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
maxNumSize int64 = 10000
|
||||
)
|
||||
|
||||
// const (
|
||||
// imagePng = "png"
|
||||
// imageJpg = "jpg"
|
||||
// imageJpeg = "jpeg"
|
||||
// imageGif = "gif"
|
||||
// imageWebp = "webp"
|
||||
// )
|
||||
|
||||
// const successCode = http.StatusOK
|
||||
|
||||
// const (
|
||||
// videoSnapshotImagePng = "png"
|
||||
// videoSnapshotImageJpg = "jpg"
|
||||
// )
|
||||
|
||||
func NewAWS() (s3.Interface, error) {
|
||||
configGlobal := config.NewGlobalConfig()
|
||||
|
||||
err := config.InitConfig(configGlobal, "../../config")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := configGlobal.Object.Aws
|
||||
credential := credentials.NewStaticCredentials(
|
||||
conf.AccessKeyID, // accessKey
|
||||
conf.AccessKeySecret, // secretKey
|
||||
"") // stoken
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Region: aws.String(conf.Region), // The area where the bucket is located
|
||||
Credentials: credential,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Aws{
|
||||
bucket: conf.Bucket,
|
||||
client: sdk.New(sess),
|
||||
credential: credential,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Aws struct {
|
||||
bucket string
|
||||
client *sdk.S3
|
||||
credential *credentials.Credentials
|
||||
}
|
||||
|
||||
func (a *Aws) Engine() string {
|
||||
return "aws"
|
||||
}
|
||||
|
||||
func (a *Aws) InitiateMultipartUpload(ctx context.Context, name string) (*s3.InitiateMultipartUploadResult, error) {
|
||||
input := &sdk.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(a.bucket), // TODO: To be verified whether it is required
|
||||
Key: aws.String(name),
|
||||
}
|
||||
result, err := a.client.CreateMultipartUploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3.InitiateMultipartUploadResult{
|
||||
Bucket: *result.Bucket,
|
||||
Key: *result.Key,
|
||||
UploadID: *result.UploadId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Aws) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
|
||||
sdkParts := make([]*sdk.CompletedPart, len(parts))
|
||||
for i, part := range parts {
|
||||
sdkParts[i] = &sdk.CompletedPart{
|
||||
ETag: aws.String(part.ETag),
|
||||
PartNumber: aws.Int64(int64(part.PartNumber)),
|
||||
}
|
||||
}
|
||||
input := &sdk.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(a.bucket), // TODO: To be verified whether it is required
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
MultipartUpload: &sdk.CompletedMultipartUpload{
|
||||
Parts: sdkParts,
|
||||
},
|
||||
}
|
||||
result, err := a.client.CompleteMultipartUploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3.CompleteMultipartUploadResult{
|
||||
Location: *result.Location,
|
||||
Bucket: *result.Bucket,
|
||||
Key: *result.Key,
|
||||
ETag: *result.ETag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Aws) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
if size <= 0 {
|
||||
return 0, errors.New("size must be greater than 0")
|
||||
}
|
||||
if size > maxPartSize*maxNumSize {
|
||||
return 0, fmt.Errorf("AWS size must be less than the maximum allowed limit")
|
||||
}
|
||||
if size <= minPartSize*maxNumSize {
|
||||
return minPartSize, nil
|
||||
}
|
||||
partSize := size / maxNumSize
|
||||
if size%maxNumSize != 0 {
|
||||
partSize++
|
||||
}
|
||||
return partSize, nil
|
||||
}
|
||||
|
||||
func (a *Aws) DeleteObject(ctx context.Context, name string) error {
|
||||
_, err := a.client.DeleteObjectWithContext(ctx, &sdk.DeleteObjectInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Aws) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
|
||||
result, err := a.client.CopyObjectWithContext(ctx, &sdk.CopyObjectInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(dst),
|
||||
CopySource: aws.String(src),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3.CopyObjectInfo{
|
||||
ETag: *result.CopyObjectResult.ETag,
|
||||
Key: dst,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Aws) IsNotFound(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case sdk.ErrCodeNoSuchKey:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *Aws) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
|
||||
_, err := a.client.AbortMultipartUploadWithContext(ctx, &sdk.AbortMultipartUploadInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Aws) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
|
||||
result, err := a.client.ListPartsWithContext(ctx, &sdk.ListPartsInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
MaxParts: aws.Int64(int64(maxParts)),
|
||||
PartNumberMarker: aws.Int64(int64(partNumberMarker)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts := make([]s3.UploadedPart, len(result.Parts))
|
||||
for i, part := range result.Parts {
|
||||
parts[i] = s3.UploadedPart{
|
||||
PartNumber: int(*part.PartNumber),
|
||||
LastModified: *part.LastModified,
|
||||
Size: *part.Size,
|
||||
ETag: *part.ETag,
|
||||
}
|
||||
}
|
||||
return &s3.ListUploadedPartsResult{
|
||||
Key: *result.Key,
|
||||
UploadID: *result.UploadId,
|
||||
NextPartNumberMarker: int(*result.NextPartNumberMarker),
|
||||
MaxParts: int(*result.MaxParts),
|
||||
UploadedParts: parts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Aws) PartLimit() *s3.PartLimit {
|
||||
return &s3.PartLimit{
|
||||
MinPartSize: minPartSize,
|
||||
MaxPartSize: maxPartSize,
|
||||
MaxNumSize: maxNumSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Aws) PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error) {
|
||||
req, _ := a.client.PutObjectRequest(&sdk.PutObjectInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
url, err := req.Presign(expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (a *Aws) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
|
||||
result, err := a.client.GetObjectWithContext(ctx, &sdk.GetObjectInput{
|
||||
Bucket: aws.String(a.bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := &s3.ObjectInfo{
|
||||
Key: name,
|
||||
ETag: *result.ETag,
|
||||
Size: *result.ContentLength,
|
||||
LastModified: *result.LastModified,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// AccessURL todo.
|
||||
func (a *Aws) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
|
||||
// todo
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (a *Aws) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
|
||||
// todo
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *Aws) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
|
||||
// todo
|
||||
return nil, nil
|
||||
}
|
||||
@ -86,7 +86,6 @@ func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.Object
|
||||
|
||||
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
|
||||
defer log.ZDebug(ctx, "return")
|
||||
log.ZInfo(ctx, "InitiateUpload", "hash", hash, "size", expire)
|
||||
if size < 0 {
|
||||
return nil, errors.New("invalid size")
|
||||
}
|
||||
@ -95,7 +94,6 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
|
||||
} else if len(hashBytes) != md5.Size {
|
||||
return nil, errors.New("invalid md5")
|
||||
}
|
||||
log.ZDebug(ctx, "InitiateUpload 33333333333333333333333")
|
||||
partSize, err := c.impl.PartSize(ctx, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -107,15 +105,11 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
|
||||
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
|
||||
return nil, fmt.Errorf("too many parts: %d", partNumber)
|
||||
}
|
||||
log.ZDebug(ctx, "InitiateUpload 4444444444444444444444444444444")
|
||||
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
|
||||
log.ZDebug(ctx, "InitiateUpload 55555555555555555555555555555555")
|
||||
return nil, &HashAlreadyExistsError{Object: info}
|
||||
} else if !c.impl.IsNotFound(err) {
|
||||
log.ZDebug(ctx, "InitiateUpload 66666666666666666666666666666666", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size <= partSize {
|
||||
// Pre-signed upload
|
||||
key := path.Join(tempPath, c.NowPath(), fmt.Sprintf("%s_%d_%s.presigned", hash, size, c.UUID()))
|
||||
|
||||
@ -29,7 +29,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
"github.com/tencentyun/cos-go-sdk-v5"
|
||||
)
|
||||
@ -50,13 +49,15 @@ const (
|
||||
|
||||
const successCode = http.StatusOK
|
||||
|
||||
const (
|
||||
// videoSnapshotImagePng = "png"
|
||||
// videoSnapshotImageJpg = "jpg"
|
||||
)
|
||||
type Config struct {
|
||||
BucketURL string
|
||||
SecretID string
|
||||
SecretKey string
|
||||
SessionToken string
|
||||
PublicRead bool
|
||||
}
|
||||
|
||||
func NewCos(config *config.GlobalConfig) (s3.Interface, error) {
|
||||
conf := config.Object.Cos
|
||||
func NewCos(conf Config) (s3.Interface, error) {
|
||||
u, err := url.Parse(conf.BucketURL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -69,18 +70,18 @@ func NewCos(config *config.GlobalConfig) (s3.Interface, error) {
|
||||
},
|
||||
})
|
||||
return &Cos{
|
||||
publicRead: conf.PublicRead,
|
||||
copyURL: u.Host + "/",
|
||||
client: client,
|
||||
credential: client.GetCredential(),
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Cos struct {
|
||||
publicRead bool
|
||||
copyURL string
|
||||
client *cos.Client
|
||||
credential *cos.Credential
|
||||
config *config.GlobalConfig
|
||||
}
|
||||
|
||||
func (c *Cos) Engine() string {
|
||||
@ -329,7 +330,7 @@ func (c *Cos) AccessURL(ctx context.Context, name string, expire time.Duration,
|
||||
}
|
||||
|
||||
func (c *Cos) getPresignedURL(ctx context.Context, name string, expire time.Duration, opt *cos.PresignedURLOptions) (*url.URL, error) {
|
||||
if !c.config.Object.Cos.PublicRead {
|
||||
if !c.publicRead {
|
||||
return c.client.Object.GetPresignedURL(ctx, http.MethodGet, name, c.credential.SecretID, c.credential.SecretKey, expire, opt)
|
||||
}
|
||||
return c.client.Object.GetObjectURL(name), nil
|
||||
|
||||
@ -42,51 +42,79 @@ func ImageWidthHeight(img image.Image) (int, int) {
|
||||
return bounds.X, bounds.Y
|
||||
}
|
||||
|
||||
// resizeImage resizes an image to a specified maximum width and height, maintaining the aspect ratio.
|
||||
// If both maxWidth and maxHeight are set to 0, the original image is returned.
|
||||
// If both are non-zero, the image is scaled to fit within the constraints while maintaining aspect ratio.
|
||||
// If only one of maxWidth or maxHeight is non-zero, the image is scaled accordingly.
|
||||
func resizeImage(img image.Image, maxWidth, maxHeight int) image.Image {
|
||||
bounds := img.Bounds()
|
||||
imgWidth, imgHeight := bounds.Dx(), bounds.Dy()
|
||||
imgWidth := bounds.Max.X
|
||||
imgHeight := bounds.Max.Y
|
||||
|
||||
// Return original image if no resizing is needed.
|
||||
// 计算缩放比例
|
||||
scaleWidth := float64(maxWidth) / float64(imgWidth)
|
||||
scaleHeight := float64(maxHeight) / float64(imgHeight)
|
||||
|
||||
// 如果都为0,则不缩放,返回原始图片
|
||||
if maxWidth == 0 && maxHeight == 0 {
|
||||
return img
|
||||
}
|
||||
|
||||
var scale float64 = 1
|
||||
// 如果宽度和高度都大于0,则选择较小的缩放比例,以保持宽高比
|
||||
if maxWidth > 0 && maxHeight > 0 {
|
||||
scaleWidth := float64(maxWidth) / float64(imgWidth)
|
||||
scaleHeight := float64(maxHeight) / float64(imgHeight)
|
||||
// Choose the smaller scale to fit both constraints.
|
||||
scale = min(scaleWidth, scaleHeight)
|
||||
} else if maxWidth > 0 {
|
||||
scale = float64(maxWidth) / float64(imgWidth)
|
||||
} else if maxHeight > 0 {
|
||||
scale = float64(maxHeight) / float64(imgHeight)
|
||||
}
|
||||
|
||||
newWidth := int(float64(imgWidth) * scale)
|
||||
newHeight := int(float64(imgHeight) * scale)
|
||||
|
||||
// Resize the image by creating a new image and manually copying pixels.
|
||||
thumbnail := image.NewRGBA(image.Rect(0, 0, newWidth, newHeight))
|
||||
for y := 0; y < newHeight; y++ {
|
||||
for x := 0; x < newWidth; x++ {
|
||||
srcX := int(float64(x) / scale)
|
||||
srcY := int(float64(y) / scale)
|
||||
thumbnail.Set(x, y, img.At(srcX, srcY))
|
||||
scale := scaleWidth
|
||||
if scaleHeight < scaleWidth {
|
||||
scale = scaleHeight
|
||||
}
|
||||
|
||||
// 计算缩略图尺寸
|
||||
thumbnailWidth := int(float64(imgWidth) * scale)
|
||||
thumbnailHeight := int(float64(imgHeight) * scale)
|
||||
|
||||
// 使用"image"库的Resample方法生成缩略图
|
||||
thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
|
||||
for y := 0; y < thumbnailHeight; y++ {
|
||||
for x := 0; x < thumbnailWidth; x++ {
|
||||
srcX := int(float64(x) / scale)
|
||||
srcY := int(float64(y) / scale)
|
||||
thumbnail.Set(x, y, img.At(srcX, srcY))
|
||||
}
|
||||
}
|
||||
|
||||
return thumbnail
|
||||
}
|
||||
|
||||
return thumbnail
|
||||
}
|
||||
// 如果只指定了宽度或高度,则根据最大不超过的规则生成缩略图
|
||||
if maxWidth > 0 {
|
||||
thumbnailWidth := maxWidth
|
||||
thumbnailHeight := int(float64(imgHeight) * scaleWidth)
|
||||
|
||||
// min returns the smaller of x or y.
|
||||
func min(x, y float64) float64 {
|
||||
if x < y {
|
||||
return x
|
||||
// 使用"image"库的Resample方法生成缩略图
|
||||
thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
|
||||
for y := 0; y < thumbnailHeight; y++ {
|
||||
for x := 0; x < thumbnailWidth; x++ {
|
||||
srcX := int(float64(x) / scaleWidth)
|
||||
srcY := int(float64(y) / scaleWidth)
|
||||
thumbnail.Set(x, y, img.At(srcX, srcY))
|
||||
}
|
||||
}
|
||||
|
||||
return thumbnail
|
||||
}
|
||||
return y
|
||||
|
||||
if maxHeight > 0 {
|
||||
thumbnailWidth := int(float64(imgWidth) * scaleHeight)
|
||||
thumbnailHeight := maxHeight
|
||||
|
||||
// 使用"image"库的Resample方法生成缩略图
|
||||
thumbnail := image.NewRGBA(image.Rect(0, 0, thumbnailWidth, thumbnailHeight))
|
||||
for y := 0; y < thumbnailHeight; y++ {
|
||||
for x := 0; x < thumbnailWidth; x++ {
|
||||
srcX := int(float64(x) / scaleHeight)
|
||||
srcY := int(float64(y) / scaleHeight)
|
||||
thumbnail.Set(x, y, img.At(srcX, srcY))
|
||||
}
|
||||
}
|
||||
|
||||
return thumbnail
|
||||
}
|
||||
|
||||
// 默认情况下,返回原始图片
|
||||
return img
|
||||
}
|
||||
|
||||
@ -33,7 +33,6 @@ import (
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
)
|
||||
@ -43,7 +42,7 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
minPartSize int64 = 1024 * 1024 * 5 // 1MB
|
||||
minPartSize int64 = 1024 * 1024 * 5 // 5MB
|
||||
maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
maxNumSize int64 = 10000
|
||||
)
|
||||
@ -57,13 +56,23 @@ const (
|
||||
|
||||
const successCode = http.StatusOK
|
||||
|
||||
func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface, error) {
|
||||
u, err := url.Parse(config.Object.Minio.Endpoint)
|
||||
type Config struct {
|
||||
Bucket string
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
SignEndpoint string
|
||||
PublicRead bool
|
||||
}
|
||||
|
||||
func NewMinio(cache cache.MinioCache, conf Config) (s3.Interface, error) {
|
||||
u, err := url.Parse(conf.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := &minio.Options{
|
||||
Creds: credentials.NewStaticV4(config.Object.Minio.AccessKeyID, config.Object.Minio.SecretAccessKey, config.Object.Minio.SessionToken),
|
||||
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
}
|
||||
client, err := minio.New(u.Host, opts)
|
||||
@ -71,27 +80,26 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
|
||||
return nil, err
|
||||
}
|
||||
m := &Minio{
|
||||
bucket: config.Object.Minio.Bucket,
|
||||
bucket: conf.Bucket,
|
||||
core: &minio.Core{Client: client},
|
||||
lock: &sync.Mutex{},
|
||||
init: false,
|
||||
cache: cache,
|
||||
config: config,
|
||||
}
|
||||
if config.Object.Minio.SignEndpoint == "" || config.Object.Minio.SignEndpoint == config.Object.Minio.Endpoint {
|
||||
if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint {
|
||||
m.opts = opts
|
||||
m.sign = m.core.Client
|
||||
m.prefix = u.Path
|
||||
u.Path = ""
|
||||
config.Object.Minio.Endpoint = u.String()
|
||||
m.signEndpoint = config.Object.Minio.Endpoint
|
||||
conf.Endpoint = u.String()
|
||||
m.signEndpoint = conf.Endpoint
|
||||
} else {
|
||||
su, err := url.Parse(config.Object.Minio.SignEndpoint)
|
||||
su, err := url.Parse(conf.SignEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.opts = &minio.Options{
|
||||
Creds: credentials.NewStaticV4(config.Object.Minio.AccessKeyID, config.Object.Minio.SecretAccessKey, config.Object.Minio.SessionToken),
|
||||
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
|
||||
Secure: su.Scheme == "https",
|
||||
}
|
||||
m.sign, err = minio.New(su.Host, m.opts)
|
||||
@ -100,8 +108,8 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
|
||||
}
|
||||
m.prefix = su.Path
|
||||
su.Path = ""
|
||||
config.Object.Minio.SignEndpoint = su.String()
|
||||
m.signEndpoint = config.Object.Minio.SignEndpoint
|
||||
conf.SignEndpoint = su.String()
|
||||
m.signEndpoint = conf.SignEndpoint
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
@ -112,6 +120,7 @@ func NewMinio(cache cache.MinioCache, config *config.GlobalConfig) (s3.Interface
|
||||
}
|
||||
|
||||
type Minio struct {
|
||||
conf Config
|
||||
bucket string
|
||||
signEndpoint string
|
||||
location string
|
||||
@ -122,7 +131,6 @@ type Minio struct {
|
||||
init bool
|
||||
prefix string
|
||||
cache cache.MinioCache
|
||||
config *config.GlobalConfig
|
||||
}
|
||||
|
||||
func (m *Minio) initMinio(ctx context.Context) error {
|
||||
@ -134,35 +142,30 @@ func (m *Minio) initMinio(ctx context.Context) error {
|
||||
if m.init {
|
||||
return nil
|
||||
}
|
||||
conf := m.config.Object.Minio
|
||||
log.ZDebug(ctx, "conf!11111111111111111111111111111111111111111111111111111111111111111111111111111", "conf", conf.Bucket, "openopen")
|
||||
if conf.Bucket != "openim" {
|
||||
log.ZError(ctx, "ppppppppppppppppppppppppppppppp", fmt.Errorf("aaa"))
|
||||
}
|
||||
exists, err := m.core.Client.BucketExists(ctx, conf.Bucket)
|
||||
exists, err := m.core.Client.BucketExists(ctx, m.conf.Bucket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check bucket exists error: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
if err = m.core.Client.MakeBucket(ctx, conf.Bucket, minio.MakeBucketOptions{}); err != nil {
|
||||
if err = m.core.Client.MakeBucket(ctx, m.conf.Bucket, minio.MakeBucketOptions{}); err != nil {
|
||||
return fmt.Errorf("make bucket error: %w", err)
|
||||
}
|
||||
}
|
||||
if conf.PublicRead {
|
||||
if m.conf.PublicRead {
|
||||
policy := fmt.Sprintf(
|
||||
`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject","s3:PutObject"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::%s/*"],"Sid": ""}]}`,
|
||||
conf.Bucket,
|
||||
m.conf.Bucket,
|
||||
)
|
||||
if err = m.core.Client.SetBucketPolicy(ctx, conf.Bucket, policy); err != nil {
|
||||
if err = m.core.Client.SetBucketPolicy(ctx, m.conf.Bucket, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m.location, err = m.core.Client.GetBucketLocation(ctx, conf.Bucket)
|
||||
m.location, err = m.core.Client.GetBucketLocation(ctx, m.conf.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func() {
|
||||
if conf.SignEndpoint == "" || conf.SignEndpoint == conf.Endpoint {
|
||||
if m.conf.SignEndpoint == "" || m.conf.SignEndpoint == m.conf.Endpoint {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
@ -182,7 +185,7 @@ func (m *Minio) initMinio(ctx context.Context) error {
|
||||
blc := reflect.ValueOf(m.sign).Elem().FieldByName("bucketLocCache")
|
||||
vblc := reflect.New(reflect.PtrTo(blc.Type()))
|
||||
*(*unsafe.Pointer)(vblc.UnsafePointer()) = unsafe.Pointer(blc.UnsafeAddr())
|
||||
vblc.Elem().Elem().Interface().(interface{ Set(string, string) }).Set(conf.Bucket, m.location)
|
||||
vblc.Elem().Elem().Interface().(interface{ Set(string, string) }).Set(m.conf.Bucket, m.location)
|
||||
}()
|
||||
m.init = true
|
||||
return nil
|
||||
@ -314,14 +317,10 @@ func (m *Minio) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, er
|
||||
if err := m.initMinio(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "StatObject !!!!!!1111111111111111111111111111111111")
|
||||
log.ZInfo(ctx, "StatObject", "bucket", m.bucket, "name", name)
|
||||
info, err := m.core.Client.StatObject(ctx, m.bucket, name, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
log.ZDebug(ctx, "StatObject !!!!!!555555555555", err)
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "StatObject !!!!!!22222222222222222222222222222222222")
|
||||
return &s3.ObjectInfo{
|
||||
ETag: strings.ToLower(info.ETag),
|
||||
Key: info.Key,
|
||||
@ -407,7 +406,7 @@ func (m *Minio) PresignedGetObject(ctx context.Context, name string, expire time
|
||||
rawURL *url.URL
|
||||
err error
|
||||
)
|
||||
if m.config.Object.Minio.PublicRead {
|
||||
if m.conf.PublicRead {
|
||||
rawURL, err = makeTargetURL(m.sign, m.bucket, name, m.location, false, query)
|
||||
} else {
|
||||
rawURL, err = m.sign.PresignedGetObject(ctx, m.bucket, name, expire, query)
|
||||
|
||||
@ -32,7 +32,6 @@ import (
|
||||
|
||||
"github.com/OpenIMSDK/tools/errs"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/s3"
|
||||
)
|
||||
|
||||
@ -52,13 +51,17 @@ const (
|
||||
|
||||
const successCode = http.StatusOK
|
||||
|
||||
/* const (
|
||||
videoSnapshotImagePng = "png"
|
||||
videoSnapshotImageJpg = "jpg"
|
||||
) */
|
||||
type Config struct {
|
||||
Endpoint string
|
||||
Bucket string
|
||||
BucketURL string
|
||||
AccessKeyID string
|
||||
AccessKeySecret string
|
||||
SessionToken string
|
||||
PublicRead bool
|
||||
}
|
||||
|
||||
func NewOSS(config *config.GlobalConfig) (s3.Interface, error) {
|
||||
conf := config.Object.Oss
|
||||
func NewOSS(conf Config) (s3.Interface, error) {
|
||||
if conf.BucketURL == "" {
|
||||
return nil, errs.Wrap(errors.New("bucket url is empty"))
|
||||
}
|
||||
@ -78,7 +81,7 @@ func NewOSS(config *config.GlobalConfig) (s3.Interface, error) {
|
||||
bucket: bucket,
|
||||
credentials: client.Config.GetCredentials(),
|
||||
um: *(*urlMaker)(reflect.ValueOf(bucket.Client.Conn).Elem().FieldByName("url").UnsafePointer()),
|
||||
PublicRead: conf.PublicRead,
|
||||
publicRead: conf.PublicRead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -87,7 +90,7 @@ type OSS struct {
|
||||
bucket *oss.Bucket
|
||||
credentials oss.Credentials
|
||||
um urlMaker
|
||||
PublicRead bool
|
||||
publicRead bool
|
||||
}
|
||||
|
||||
func (o *OSS) Engine() string {
|
||||
@ -284,7 +287,6 @@ func (o *OSS) ListUploadedParts(ctx context.Context, uploadID string, name strin
|
||||
}
|
||||
|
||||
func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
|
||||
publicRead := o.PublicRead
|
||||
var opts []oss.Option
|
||||
if opt != nil {
|
||||
if opt.Image != nil {
|
||||
@ -312,7 +314,7 @@ func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration,
|
||||
process += ",format," + format
|
||||
opts = append(opts, oss.Process(process))
|
||||
}
|
||||
if !publicRead {
|
||||
if !o.publicRead {
|
||||
if opt.ContentType != "" {
|
||||
opts = append(opts, oss.ResponseContentType(opt.ContentType))
|
||||
}
|
||||
@ -326,7 +328,7 @@ func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration,
|
||||
} else if expire < time.Second {
|
||||
expire = time.Second
|
||||
}
|
||||
if !publicRead {
|
||||
if !o.publicRead {
|
||||
return o.bucket.SignURL(name, http.MethodGet, int64(expire/time.Second), opts...)
|
||||
}
|
||||
rawParams, err := oss.GetRawParams(opts)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user