diff --git a/.github/workflows/go-build-test.yml b/.github/workflows/go-build-test.yml index 4033603e6..9e2aa3f1c 100644 --- a/.github/workflows/go-build-test.yml +++ b/.github/workflows/go-build-test.yml @@ -12,6 +12,10 @@ jobs: go-build: name: Test with go ${{ matrix.go_version }} on ${{ matrix.os }} runs-on: ${{ matrix.os }} + + env: + SHARE_CONFIG_PATH: config/share.yml + permissions: contents: write pull-requests: write @@ -40,6 +44,10 @@ jobs: with: compose-file: "./docker-compose.yml" + - name: Modify Server Configuration + run: | + yq e '.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} + # - name: Get Internal IP Address # id: get-ip # run: | @@ -71,6 +79,11 @@ jobs: go mod download go install github.com/magefile/mage@latest + - name: Modify Chat Configuration + run: | + cd ${{ github.workspace }}/chat-repo + yq e '.openIM.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} + - name: Build and test Chat Services run: | cd ${{ github.workspace }}/chat-repo @@ -132,7 +145,7 @@ jobs: # Test get admin token get_admin_token_response=$(curl -X POST -H "Content-Type: application/json" -H "operationID: imAdmin" -d '{ - "secret": "openIM123", + "secret": "123456", "platformID": 2, "userID": "imAdmin" }' http://127.0.0.1:10002/auth/get_admin_token) @@ -169,7 +182,8 @@ jobs: contents: write env: SDK_DIR: openim-sdk-core - CONFIG_PATH: config/notification.yml + NOTIFICATION_CONFIG_PATH: config/notification.yml + SHARE_CONFIG_PATH: config/share.yml strategy: matrix: @@ -184,7 +198,7 @@ jobs: uses: actions/checkout@v4 with: repository: "openimsdk/openim-sdk-core" - ref: "release-v3.8" + ref: "main" path: ${{ env.SDK_DIR }} - name: Set up Go ${{ matrix.go_version }} @@ -199,8 +213,9 @@ jobs: - name: Modify Server Configuration run: | - yq e '.groupCreated.isSendMsg = true' -i ${{ env.CONFIG_PATH }} - yq e '.friendApplicationApproved.isSendMsg = true' -i ${{ env.CONFIG_PATH }} + yq e '.groupCreated.isSendMsg = true' -i ${{ env.NOTIFICATION_CONFIG_PATH }} + yq e '.friendApplicationApproved.isSendMsg = true' -i ${{ env.NOTIFICATION_CONFIG_PATH }} + yq e '.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} - name: Start Server Services run: | diff --git a/.golangci.yml b/.golangci.yml index a95e980f8..7d6c6b596 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -28,6 +28,8 @@ run: # - util # - .*~ # - api/swagger/docs + + # - server/docs # - components/mnt/config/certs # - logs diff --git a/README.md b/README.md index 9b6606f88..4745b9a37 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,15 @@ Thank you for contributing to building a powerful instant messaging solution! ## :closed_book: License -This software is licensed under the Apache License 2.0 +This software is licensed under a dual-license model: + +- The GNU Affero General Public License (AGPL), Version 3 or later; **OR** +- Commercial license terms from OpenIMSDK. + +If you wish to use this software under commercial terms, please contact us at: contact@openim.io + +For more information, see: https://www.openim.io/en/licensing + diff --git a/README_zh_CN.md b/README_zh_CN.md index 1dcf21bda..2340ad09a 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -131,9 +131,17 @@ 感谢您的贡献,一起来打造强大的即时通讯解决方案! -## :closed_book: 许可证 +## :closed_book: 开源许可证 License + +本软件采用双重授权模型: + +GNU Affero 通用公共许可证(AGPL)第 3 版或更高版本;或 + +来自 OpenIMSDK 的商业授权条款。 + +如需商用,请联系:contact@openim.io +详见:https://www.openim.io/en/licensing -This software is licensed under the Apache License 2.0 ## 🔮 Thanks to our contributors! diff --git a/config/share.yml b/config/share.yml index a5fbeac75..2e9821436 100644 --- a/config/share.yml +++ b/config/share.yml @@ -1,9 +1,13 @@ secret: openIM123 -imAdminUserID: [ imAdmin ] +imAdminUserID: [imAdmin] # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time multiLogin: policy: 1 # max num of tokens in one end - maxNumOneEnd: 30 \ No newline at end of file + maxNumOneEnd: 30 + +rpcMaxBodySize: + requestMaxBodySize: 8388608 + responseMaxBodySize: 8388608 diff --git a/go.mod b/go.mod index 70240ed05..0e3a13904 100644 --- a/go.mod +++ b/go.mod @@ -219,3 +219,5 @@ require ( golang.org/x/crypto v0.27.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) + +//replace github.com/openimsdk/protocol => /Users/chao/Desktop/code/protocol diff --git a/internal/api/init.go b/internal/api/init.go index 4bd29c9e0..1e0f1075f 100644 --- a/internal/api/init.go +++ b/internal/api/init.go @@ -144,24 +144,23 @@ func Start(ctx context.Context, index int, config *Config) error { } }() - if config.Discovery.Enable == conf.ETCD { - cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames()) - cm.Watch(ctx) - } - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM) - - shutdown := func() error { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - err := server.Shutdown(ctx) - if err != nil { - return errs.WrapMsg(err, "shutdown err") - } - return nil - } - disetcd.RegisterShutDown(shutdown) + //if config.Discovery.Enable == conf.ETCD { + // cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames()) + // cm.Watch(ctx) + //} + //sigs := make(chan os.Signal, 1) + //signal.Notify(sigs, syscall.SIGTERM) + //select { + //case val := <-sigs: + // log.ZDebug(ctx, "recv exit", "signal", val.String()) + // cancel(fmt.Errorf("signal %s", val.String())) + //case <-ctx.Done(): + //} + <-apiCtx.Done() + exitCause := context.Cause(apiCtx) + log.ZWarn(ctx, "api server exit", exitCause) + timer := time.NewTimer(time.Second * 15) + defer timer.Stop() select { case <-sigs: program.SIGTERMExit() diff --git a/internal/api/router.go b/internal/api/router.go index b53f7c2f0..e9e5f6d5f 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -129,6 +129,11 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf userRouterGroup.POST("/add_notification_account", u.AddNotificationAccount) userRouterGroup.POST("/update_notification_account", u.UpdateNotificationAccountInfo) userRouterGroup.POST("/search_notification_account", u.SearchNotificationAccount) + + userRouterGroup.POST("/get_user_client_config", u.GetUserClientConfig) + userRouterGroup.POST("/set_user_client_config", u.SetUserClientConfig) + userRouterGroup.POST("/del_user_client_config", u.DelUserClientConfig) + userRouterGroup.POST("/page_user_client_config", u.PageUserClientConfig) } // friend routing group { diff --git a/internal/api/user.go b/internal/api/user.go index a88f8f65a..7f256f5dd 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -242,3 +242,19 @@ func (u *UserApi) UpdateNotificationAccountInfo(c *gin.Context) { func (u *UserApi) SearchNotificationAccount(c *gin.Context) { a2r.Call(c, user.UserClient.SearchNotificationAccount, u.Client) } + +func (u *UserApi) GetUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.GetUserClientConfig, u.Client) +} + +func (u *UserApi) SetUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.SetUserClientConfig, u.Client) +} + +func (u *UserApi) DelUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.DelUserClientConfig, u.Client) +} + +func (u *UserApi) PageUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.PageUserClientConfig, u.Client) +} diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go index 5b4378266..2c2691d1d 100644 --- a/internal/rpc/auth/auth.go +++ b/internal/rpc/auth/auth.go @@ -18,11 +18,14 @@ import ( "context" "errors" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" + "github.com/openimsdk/open-im-server/v3/pkg/dbbuild" "github.com/openimsdk/open-im-server/v3/pkg/rpcli" "github.com/openimsdk/open-im-server/v3/pkg/common/config" redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" - "github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/utils/datautil" "github.com/redis/go-redis/v9" @@ -43,7 +46,7 @@ import ( type authServer struct { pbauth.UnimplementedAuthServer authDatabase controller.AuthDatabase - RegisterCenter discovery.SvcDiscoveryRegistry + RegisterCenter discovery.Conn config *Config userClient *rpcli.UserClient } @@ -51,15 +54,31 @@ type authServer struct { type Config struct { RpcConfig config.Auth RedisConfig config.Redis + MongoConfig config.Mongo Share config.Share Discovery config.Discovery } -func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error { - rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) +func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error { + dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig) + rdb, err := dbb.Redis(ctx) if err != nil { return err } + var token cache.TokenModel + if rdb == nil { + mdb, err := dbb.Mongo(ctx) + if err != nil { + return err + } + mc, err := mgo.NewCacheMgo(mdb.GetDB()) + if err != nil { + return err + } + token = mcache.NewTokenCacheModel(mc, config.RpcConfig.TokenPolicy.Expire) + } else { + token = redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire) + } userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User) if err != nil { return err @@ -67,7 +86,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg pbauth.RegisterAuthServer(server, &authServer{ RegisterCenter: client, authDatabase: controller.NewAuthDatabase( - redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire), + token, config.Share.Secret, config.RpcConfig.TokenPolicy.Expire, config.Share.MultiLogin, @@ -140,10 +159,6 @@ func (s *authServer) parseToken(ctx context.Context, tokensString string) (claim if err != nil { return nil, err } - isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID) - if isAdmin { - return claims, nil - } m, err := s.authDatabase.GetTokensWithoutError(ctx, claims.UserID, claims.PlatformID) if err != nil { return nil, err diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index 9f7a8e63c..1ed3ce799 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -451,12 +451,25 @@ func (g *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite return nil, err } - if err := g.db.CreateGroup(ctx, nil, groupMembers); err != nil { - return nil, err - } + const singleQuantity = 50 + for start := 0; start < len(groupMembers); start += singleQuantity { + end := start + singleQuantity + if end > len(groupMembers) { + end = len(groupMembers) + } + currentMembers := groupMembers[start:end] - if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, opUserID, req.InvitedUserIDs...); err != nil { - return nil, err + if err := g.db.CreateGroup(ctx, nil, currentMembers); err != nil { + return nil, err + } + + userIDs := datautil.Slice(currentMembers, func(e *model.GroupMember) string { + return e.UserID + }) + + if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, req.SendMessage, opUserID, userIDs...); err != nil { + return nil, err + } } return &pbgroup.InviteUserToGroupResp{}, nil } @@ -1353,6 +1366,7 @@ func (g *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGrou if err != nil { return nil, err } + group.Status = constant.GroupStatusDismissed tips := &sdkws.GroupDismissedTips{ Group: g.groupDB2PB(group, owner.UserID, num), OpUser: &sdkws.GroupMemberFullInfo{}, diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 4b7a56a6d..a6c715735 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -283,7 +283,8 @@ func (g *NotificationSender) fillOpUserByUserID(ctx context.Context, userID stri func (g *NotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) { versions := versionctx.GetVersionLog(ctx).Get() - for _, coll := range versions { + for i := len(versions) - 1; i >= 0; i-- { + coll := versions[i] if coll.Name == collName && coll.Doc.DID == id { *version = uint64(coll.Doc.Version) *versionID = coll.Doc.ID.Hex() @@ -519,7 +520,11 @@ func (g *NotificationSender) MemberKickedNotification(ctx context.Context, tips g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) } -func (g *NotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { +func (g *NotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, SendMessage *bool, invitedOpUserID string, entrantUserID ...string) error { + return g.groupApplicationAgreeMemberEnterNotification(ctx, groupID, SendMessage, invitedOpUserID, entrantUserID...) +} + +func (g *NotificationSender) groupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, SendMessage *bool, invitedOpUserID string, entrantUserID ...string) error { var err error defer func() { if err != nil { diff --git a/internal/rpc/msg/as_read.go b/internal/rpc/msg/as_read.go index de1879438..b25eae6b1 100644 --- a/internal/rpc/msg/as_read.go +++ b/internal/rpc/msg/as_read.go @@ -61,6 +61,13 @@ func (m *msgServer) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *m return nil, err } resp := &msg.GetConversationsHasReadAndMaxSeqResp{Seqs: make(map[string]*msg.Seqs)} + if req.ReturnPinned { + pinnedConversationIDs, err := m.ConversationLocalCache.GetPinnedConversationIDs(ctx, req.UserID) + if err != nil { + return nil, err + } + resp.PinnedConversationIDs = pinnedConversationIDs + } for conversationID, maxSeq := range maxSeqs { resp.Seqs[conversationID] = &msg.Seqs{ HasReadSeq: hasReadSeqs[conversationID], diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go index 96af0dba1..bfdd9f1b8 100644 --- a/internal/rpc/third/s3.go +++ b/internal/rpc/third/s3.go @@ -62,7 +62,7 @@ func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.In return nil, err } expireTime := time.Now().Add(t.defaultExpire) - result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts)) + result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts), req.ContentType) if err != nil { if haErr, ok := errs.Unwrap(err).(*cont.HashAlreadyExistsError); ok { obj := &model.Object{ diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index 8b9b47b72..7f082f784 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -23,45 +23,48 @@ import ( "time" "github.com/openimsdk/open-im-server/v3/internal/rpc/relation" + "github.com/openimsdk/open-im-server/v3/pkg/authverify" "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/open-im-server/v3/pkg/common/convert" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" + "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" "github.com/openimsdk/open-im-server/v3/pkg/common/webhook" + "github.com/openimsdk/open-im-server/v3/pkg/dbbuild" "github.com/openimsdk/open-im-server/v3/pkg/localcache" "github.com/openimsdk/open-im-server/v3/pkg/rpcli" + "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/group" friendpb "github.com/openimsdk/protocol/relation" - "github.com/openimsdk/tools/db/redisutil" - - "github.com/openimsdk/open-im-server/v3/pkg/authverify" - "github.com/openimsdk/open-im-server/v3/pkg/common/convert" - "github.com/openimsdk/open-im-server/v3/pkg/common/servererrs" - "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" - "github.com/openimsdk/protocol/constant" "github.com/openimsdk/protocol/sdkws" pbuser "github.com/openimsdk/protocol/user" - "github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/pagination" - registry "github.com/openimsdk/tools/discovery" + "github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/utils/datautil" "google.golang.org/grpc" ) +const ( + defaultSecret = "openIM123" +) + type userServer struct { pbuser.UnimplementedUserServer online cache.OnlineCache db controller.UserDatabase friendNotificationSender *relation.FriendNotificationSender userNotificationSender *UserNotificationSender - RegisterCenter registry.SvcDiscoveryRegistry + RegisterCenter discovery.Conn config *Config webhookClient *webhook.Client groupClient *rpcli.GroupClient relationClient *rpcli.RelationClient + clientConfig controller.ClientConfigDatabase } type Config struct { @@ -76,24 +79,30 @@ type Config struct { Discovery config.Discovery } -func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegistry, server *grpc.Server) error { - mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build()) +func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error { + dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig) + mgocli, err := dbb.Mongo(ctx) if err != nil { return err } - rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build()) + rdb, err := dbb.Redis(ctx) if err != nil { return err } + users := make([]*tablerelation.User, 0) for _, v := range config.Share.IMAdminUserID { - users = append(users, &tablerelation.User{UserID: v, Nickname: v, AppMangerLevel: constant.AppNotificationAdmin}) + users = append(users, &tablerelation.User{UserID: v, Nickname: v, AppMangerLevel: constant.AppAdmin}) } userDB, err := mgo.NewUserMongo(mgocli.GetDB()) if err != nil { return err } + clientConfigDB, err := mgo.NewClientConfig(mgocli.GetDB()) + if err != nil { + return err + } msgConn, err := client.GetConn(ctx, config.Discovery.RpcService.Msg) if err != nil { return err @@ -118,9 +127,9 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi userNotificationSender: NewUserNotificationSender(config, msgClient, WithUserFunc(database.FindWithError)), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), - - groupClient: rpcli.NewGroupClient(groupConn), - relationClient: rpcli.NewRelationClient(friendConn), + clientConfig: controller.NewClientConfigDatabase(clientConfigDB, redis.NewClientConfigCache(rdb, clientConfigDB), mgocli.GetTx()), + groupClient: rpcli.NewGroupClient(groupConn), + relationClient: rpcli.NewRelationClient(friendConn), } pbuser.RegisterUserServer(server, u) return u.db.InitOnce(context.Background(), users) @@ -606,7 +615,7 @@ func (s *userServer) GetNotificationAccount(ctx context.Context, req *pbuser.Get if err != nil { return nil, servererrs.ErrUserIDNotFound.Wrap() } - if user.AppMangerLevel == constant.AppAdmin || user.AppMangerLevel >= constant.AppNotificationAdmin { + if user.AppMangerLevel >= constant.AppAdmin { return &pbuser.GetNotificationAccountResp{Account: &pbuser.NotificationAccountInfo{ UserID: user.UserID, FaceURL: user.FaceURL, diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index ca448083c..6b3bff30f 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -378,9 +378,15 @@ type AfterConfig struct { } type Share struct { - Secret string `mapstructure:"secret"` - IMAdminUserID []string `mapstructure:"imAdminUserID"` - MultiLogin MultiLogin `mapstructure:"multiLogin"` + Secret string `yaml:"secret"` + IMAdminUserID []string `yaml:"imAdminUserID"` + MultiLogin MultiLogin `yaml:"multiLogin"` + RPCMaxBodySize MaxRequestBody `yaml:"rpcMaxBodySize"` +} + +type MaxRequestBody struct { + RequestMaxBodySize int `yaml:"requestMaxBodySize"` + ResponseMaxBodySize int `yaml:"responseMaxBodySize"` } type MultiLogin struct { diff --git a/pkg/common/servererrs/code.go b/pkg/common/servererrs/code.go index 3d0aa4a71..906f890a5 100644 --- a/pkg/common/servererrs/code.go +++ b/pkg/common/servererrs/code.go @@ -37,7 +37,8 @@ const ( // General error codes. const ( - NoError = 0 // No error + NoError = 0 // No error + DatabaseError = 90002 // Database error (redis/mysql, etc.) NetworkError = 90004 // Network error DataError = 90007 // Data error @@ -45,11 +46,12 @@ const ( CallbackError = 80000 // General error codes. - ServerInternalError = 500 // Server internal error - ArgsError = 1001 // Input parameter error - NoPermissionError = 1002 // Insufficient permission - DuplicateKeyError = 1003 - RecordNotFoundError = 1004 // Record does not exist + ServerInternalError = 500 // Server internal error + ArgsError = 1001 // Input parameter error + NoPermissionError = 1002 // Insufficient permission + DuplicateKeyError = 1003 + RecordNotFoundError = 1004 // Record does not exist + SecretNotChangedError = 1050 // secret not changed // Account error codes. UserIDNotFoundError = 1101 // UserID does not exist or is not registered diff --git a/pkg/common/servererrs/predefine.go b/pkg/common/servererrs/predefine.go index ab09aa512..b1d6b06a9 100644 --- a/pkg/common/servererrs/predefine.go +++ b/pkg/common/servererrs/predefine.go @@ -17,6 +17,8 @@ package servererrs import "github.com/openimsdk/tools/errs" var ( + ErrSecretNotChanged = errs.NewCodeError(SecretNotChangedError, "secret not changed, please change secret in config/share.yml for security reasons") + ErrDatabase = errs.NewCodeError(DatabaseError, "DatabaseError") ErrNetwork = errs.NewCodeError(NetworkError, "NetworkError") ErrCallback = errs.NewCodeError(CallbackError, "CallbackError") diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index dfec77913..03621343b 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -19,22 +19,19 @@ import ( "errors" "fmt" "net" - "net/http" "os" "os/signal" + "reflect" "strconv" "syscall" "time" conf "github.com/openimsdk/open-im-server/v3/pkg/common/config" - disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd" - "github.com/openimsdk/tools/discovery/etcd" "github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/jsonutil" + "github.com/openimsdk/tools/utils/network" "google.golang.org/grpc/status" - "github.com/openimsdk/tools/utils/runtimeenv" - kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/tools/discovery" @@ -111,17 +108,9 @@ func getConfigShare(value reflect.Value) *conf.Share { func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP, registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T, watchConfigNames []string, watchServiceNames []string, - rpcFn func(ctx context.Context, config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, + rpcFn func(ctx context.Context, config T, client discovery.Conn, server grpc.ServiceRegistrar) error, options ...grpc.ServerOption) error { - watchConfigNames = append(watchConfigNames, conf.LogConfigFileName) - var ( - rpcTcpAddr string - netDone = make(chan struct{}, 2) - netErr error - prometheusPort int - ) - if notification != nil { conf.InitNotification(notification) } @@ -157,33 +146,20 @@ func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *c if err != nil { return err } - - runTimeEnv := runtimeenv.RuntimeEnvironment() - - if !autoSetPorts { - rpcPort, err := datautil.GetElemByIndex(rpcPorts, index) + var prometheusListenAddr string + if autoSetPorts { + prometheusListenAddr = net.JoinHostPort(listenIP, "0") + } else { + prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index) if err != nil { return err } - rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort)) - } else { - rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), "0") + prometheusListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(prometheusPort)) } - getAutoPort := func() (net.Listener, int, error) { - listener, err := net.Listen("tcp", rpcTcpAddr) - if err != nil { - return nil, 0, errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) - } - _, portStr, _ := net.SplitHostPort(listener.Addr().String()) - port, _ := strconv.Atoi(portStr) - return listener, port, nil - } + watchConfigNames = append(watchConfigNames, conf.LogConfigFileName) - if autoSetPorts && discovery.Enable != conf.ETCD { - return errs.New("only etcd support autoSetPorts", "rpcRegisterName", rpcRegisterName).Wrap() - } - client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv, watchServiceNames) + client, err := kdisc.NewDiscoveryRegister(disc, watchServiceNames) if err != nil { return err } @@ -217,122 +193,111 @@ func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *c if prometheusListenAddr != "" { options = append( - options, mw.GrpcServer(), + options, prommetricsUnaryInterceptor(rpcRegisterName), prommetricsStreamInterceptor(rpcRegisterName), ) - - var ( - listener net.Listener - ) - - if autoSetPorts { - listener, prometheusPort, err = getAutoPort() - if err != nil { - return err - } - - etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient() - - _, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(rpcRegisterName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort))) - if err != nil { - return errs.WrapMsg(err, "etcd put err") - } - } else { - prometheusPort, err = datautil.GetElemByIndex(prometheusConfig.Ports, index) - if err != nil { - return err - } - listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort)) - if err != nil { - return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr) - } - } - cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, discovery) - go func() { - if err := prommetrics.RpcInit(cs, listener); err != nil && !errors.Is(err, http.ErrServerClosed) { - netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort)) - netDone <- struct{}{} - } - //metric.InitializeMetrics(srv) - // Create a HTTP server for prometheus. - // httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)} - // if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - // netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr) - // netDone <- struct{}{} - // } - }() - } else { - options = append(options, mw.GrpcServer()) - } - - listener, port, err := getAutoPort() - if err != nil { - return err - } - - log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", port, - "prometheusPort", prometheusPort) - - defer listener.Close() - srv := grpc.NewServer(options...) - - err = rpcFn(ctx, config, client, srv) - if err != nil { - return err - } - - err = client.Register( - ctx, - rpcRegisterName, - registerIP, - port, - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - return err - } - - go func() { - err := srv.Serve(listener) - if err != nil && !errors.Is(err, http.ErrServerClosed) { - netErr = errs.WrapMsg(err, "rpc start err: ", rpcTcpAddr) - netDone <- struct{}{} - } - }() - - if discovery.Enable == conf.ETCD { - cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), watchConfigNames) - cm.Watch(ctx) - } - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM) - select { - case <-sigs: - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := gracefulStopWithCtx(ctx, srv.GracefulStop); err != nil { + prometheusListener, prometheusPort, err := listenTCP(prometheusListenAddr) + if err != nil { return err } - return nil - case <-netDone: - return netErr + log.ZDebug(ctx, "prometheus start", "addr", prometheusListener.Addr(), "rpcRegisterName", rpcRegisterName) + target, err := jsonutil.JsonMarshal(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)) + if err != nil { + return err + } + if err := client.SetKey(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), target); err != nil { + if !errors.Is(err, discovery.ErrNotSupportedKeyValue) { + return err + } + } + go func() { + err := prommetrics.Start(prometheusListener) + if err == nil { + err = fmt.Errorf("listener done") + } + cancel(fmt.Errorf("prommetrics %s %w", rpcRegisterName, err)) + }() } + + var ( + rpcServer *grpc.Server + rpcGracefulStop chan struct{} + ) + + onGrpcServiceRegistrar := func(desc *grpc.ServiceDesc, impl any) { + if rpcServer != nil { + rpcServer.RegisterService(desc, impl) + return + } + var rpcListenAddr string + if autoSetPorts { + rpcListenAddr = net.JoinHostPort(listenIP, "0") + } else { + rpcPort, err := datautil.GetElemByIndex(rpcPorts, index) + if err != nil { + cancel(fmt.Errorf("rpcPorts index out of range %s %w", rpcRegisterName, err)) + return + } + rpcListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(rpcPort)) + } + rpcListener, err := net.Listen("tcp", rpcListenAddr) + if err != nil { + cancel(fmt.Errorf("listen rpc %s %s %w", rpcRegisterName, rpcListenAddr, err)) + return + } + + rpcServer = grpc.NewServer(options...) + rpcServer.RegisterService(desc, impl) + rpcGracefulStop = make(chan struct{}) + rpcPort := rpcListener.Addr().(*net.TCPAddr).Port + log.ZDebug(ctx, "rpc start register", "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "rpcPort", rpcPort) + grpcOpt := grpc.WithTransportCredentials(insecure.NewCredentials()) + rpcGracefulStop = make(chan struct{}) + go func() { + <-ctx.Done() + rpcServer.GracefulStop() + close(rpcGracefulStop) + }() + if err := client.Register(ctx, rpcRegisterName, registerIP, rpcListener.Addr().(*net.TCPAddr).Port, grpcOpt); err != nil { + cancel(fmt.Errorf("rpc register %s %w", rpcRegisterName, err)) + return + } + + go func() { + err := rpcServer.Serve(rpcListener) + if err == nil { + err = fmt.Errorf("serve end") + } + cancel(fmt.Errorf("rpc %s %w", rpcRegisterName, err)) + }() + } + + err = rpcFn(ctx, config, client, &grpcServiceRegistrar{onRegisterService: onGrpcServiceRegistrar}) + if err != nil { + return err + } + <-ctx.Done() + log.ZDebug(ctx, "cmd wait done", "err", context.Cause(ctx)) + if rpcGracefulStop != nil { + timeout := time.NewTimer(time.Second * 15) + defer timeout.Stop() + select { + case <-timeout.C: + log.ZWarn(ctx, "rcp graceful stop timeout", nil) + case <-rpcGracefulStop: + log.ZDebug(ctx, "rcp graceful stop done") + } + } + return context.Cause(ctx) } -func gracefulStopWithCtx(ctx context.Context, f func()) error { - done := make(chan struct{}, 1) - go func() { - f() - close(done) - }() - select { - case <-ctx.Done(): - return errs.New("timeout, ctx graceful stop") - case <-done: - return nil +func listenTCP(addr string) (net.Listener, int, error) { + listener, err := net.Listen("tcp", addr) + if err != nil { + return nil, 0, errs.WrapMsg(err, "listen err", "addr", addr) } + return listener, listener.Addr().(*net.TCPAddr).Port, nil } func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption { @@ -356,3 +321,11 @@ func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption { func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption { return grpc.ChainStreamInterceptor() } + +type grpcServiceRegistrar struct { + onRegisterService func(desc *grpc.ServiceDesc, impl any) +} + +func (x *grpcServiceRegistrar) RegisterService(desc *grpc.ServiceDesc, impl any) { + x.onRegisterService(desc, impl) +} diff --git a/pkg/common/storage/cache/cachekey/client_config.go b/pkg/common/storage/cache/cachekey/client_config.go new file mode 100644 index 000000000..16770adef --- /dev/null +++ b/pkg/common/storage/cache/cachekey/client_config.go @@ -0,0 +1,10 @@ +package cachekey + +const ClientConfig = "CLIENT_CONFIG" + +func GetClientConfigKey(userID string) string { + if userID == "" { + return ClientConfig + } + return ClientConfig + ":" + userID +} diff --git a/pkg/common/storage/cache/cachekey/token.go b/pkg/common/storage/cache/cachekey/token.go index 83ba2f211..6fe1bdfef 100644 --- a/pkg/common/storage/cache/cachekey/token.go +++ b/pkg/common/storage/cache/cachekey/token.go @@ -1,8 +1,9 @@ package cachekey import ( - "github.com/openimsdk/protocol/constant" "strings" + + "github.com/openimsdk/protocol/constant" ) const ( @@ -13,6 +14,10 @@ func GetTokenKey(userID string, platformID int) string { return UidPidToken + userID + ":" + constant.PlatformIDToName(platformID) } +func GetTemporaryTokenKey(userID string, platformID int, token string) string { + return UidPidToken + ":TEMPORARY:" + userID + ":" + constant.PlatformIDToName(platformID) + ":" + token +} + func GetAllPlatformTokenKey(userID string) []string { res := make([]string, len(constant.PlatformID2Name)) for k := range constant.PlatformID2Name { diff --git a/pkg/common/storage/cache/client_config.go b/pkg/common/storage/cache/client_config.go new file mode 100644 index 000000000..329f25c59 --- /dev/null +++ b/pkg/common/storage/cache/client_config.go @@ -0,0 +1,8 @@ +package cache + +import "context" + +type ClientConfigCache interface { + DeleteUserCache(ctx context.Context, userIDs []string) error + GetUserConfig(ctx context.Context, userID string) (map[string]string, error) +} diff --git a/pkg/common/storage/cache/mcache/token.go b/pkg/common/storage/cache/mcache/token.go new file mode 100644 index 000000000..98b9cc066 --- /dev/null +++ b/pkg/common/storage/cache/mcache/token.go @@ -0,0 +1,166 @@ +package mcache + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" +) + +func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel { + c := &tokenCache{cache: cache} + c.accessExpire = c.getExpireTime(accessExpire) + return c +} + +type tokenCache struct { + cache database.Cache + accessExpire time.Duration +} + +func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string { + return cachekey.GetTokenKey(userID, platformID) + ":" + token +} + +func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error { + return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire) +} + +// SetTokenFlagEx set token and flag with expire time +func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error { + return x.SetTokenFlag(ctx, userID, platformID, token, flag) +} + +func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) { + prefix := x.getTokenKey(userID, platformID, "") + m, err := x.cache.Prefix(ctx, prefix) + if err != nil { + return nil, errs.Wrap(err) + } + mm := make(map[string]int) + for k, v := range m { + state, err := strconv.Atoi(v) + if err != nil { + log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID) + continue + } + mm[strings.TrimPrefix(k, prefix)] = state + } + return mm, nil +} + +func (x *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error { + key := cachekey.GetTemporaryTokenKey(userID, platformID, token) + if _, err := x.cache.Get(ctx, []string{key}); err != nil { + return err + } + return nil +} + +func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) { + prefix := cachekey.UidPidToken + userID + ":" + tokens, err := x.cache.Prefix(ctx, prefix) + if err != nil { + return nil, err + } + res := make(map[int]map[string]int) + for key, flagStr := range tokens { + flag, err := strconv.Atoi(flagStr) + if err != nil { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2) + if len(arr) != 2 { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + platformID, err := strconv.Atoi(arr[0]) + if err != nil { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + token := arr[1] + if token == "" { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + tk, ok := res[platformID] + if !ok { + tk = make(map[string]int) + res[platformID] = tk + } + tk[token] = flag + } + return res, nil +} + +func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error { + for token, flag := range m { + err := x.SetTokenFlag(ctx, userID, platformID, token, flag) + if err != nil { + return err + } + } + return nil +} + +func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error { + for prefix, tokenFlag := range tokens { + for token, flag := range tokenFlag { + flagStr := fmt.Sprintf("%v", flag) + if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil { + return err + } + } + } + return nil +} + +func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error { + keys := make([]string, 0, len(fields)) + for _, token := range fields { + keys = append(keys, x.getTokenKey(userID, platformID, token)) + } + return x.cache.Del(ctx, keys) +} + +func (x *tokenCache) getExpireTime(t int64) time.Duration { + return time.Hour * 24 * time.Duration(t) +} + +func (x *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error { + keys := make([]string, 0, len(tokens)) + for platformID, ts := range tokens { + for _, t := range ts { + keys = append(keys, x.getTokenKey(userID, platformID, t)) + } + } + return x.cache.Del(ctx, keys) +} + +func (x *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error { + keys := make([]string, 0, len(fields)) + for _, f := range fields { + keys = append(keys, x.getTokenKey(userID, platformID, f)) + } + if err := x.cache.Del(ctx, keys); err != nil { + return err + } + + for _, f := range fields { + k := cachekey.GetTemporaryTokenKey(userID, platformID, f) + if err := x.cache.Set(ctx, k, "", time.Minute*5); err != nil { + return errs.Wrap(err) + } + } + + return nil +} diff --git a/pkg/common/storage/cache/redis/client_config.go b/pkg/common/storage/cache/redis/client_config.go new file mode 100644 index 000000000..c5a455146 --- /dev/null +++ b/pkg/common/storage/cache/redis/client_config.go @@ -0,0 +1,69 @@ +package redis + +import ( + "context" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/redis/go-redis/v9" +) + +func NewClientConfigCache(rdb redis.UniversalClient, mgo database.ClientConfig) cache.ClientConfigCache { + rc := newRocksCacheClient(rdb) + return &ClientConfigCache{ + mgo: mgo, + rcClient: rc, + delete: rc.GetBatchDeleter(), + } +} + +type ClientConfigCache struct { + mgo database.ClientConfig + rcClient *rocksCacheClient + delete cache.BatchDeleter +} + +func (x *ClientConfigCache) getExpireTime(userID string) time.Duration { + if userID == "" { + return time.Hour * 24 + } else { + return time.Hour + } +} + +func (x *ClientConfigCache) getClientConfigKey(userID string) string { + return cachekey.GetClientConfigKey(userID) +} + +func (x *ClientConfigCache) GetConfig(ctx context.Context, userID string) (map[string]string, error) { + return getCache(ctx, x.rcClient, x.getClientConfigKey(userID), x.getExpireTime(userID), func(ctx context.Context) (map[string]string, error) { + return x.mgo.Get(ctx, userID) + }) +} + +func (x *ClientConfigCache) DeleteUserCache(ctx context.Context, userIDs []string) error { + keys := make([]string, 0, len(userIDs)) + for _, userID := range userIDs { + keys = append(keys, x.getClientConfigKey(userID)) + } + return x.delete.ExecDelWithKeys(ctx, keys) +} + +func (x *ClientConfigCache) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) { + config, err := x.GetConfig(ctx, "") + if err != nil { + return nil, err + } + if userID != "" { + userConfig, err := x.GetConfig(ctx, userID) + if err != nil { + return nil, err + } + for k, v := range userConfig { + config[k] = v + } + } + return config, nil +} diff --git a/pkg/common/storage/cache/redis/token.go b/pkg/common/storage/cache/redis/token.go index 510da43e3..b3870daee 100644 --- a/pkg/common/storage/cache/redis/token.go +++ b/pkg/common/storage/cache/redis/token.go @@ -9,6 +9,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/utils/datautil" "github.com/redis/go-redis/v9" ) @@ -55,6 +56,14 @@ func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, p return mm, nil } +func (c *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error { + err := c.rdb.Get(ctx, cachekey.GetTemporaryTokenKey(userID, platformID, token)).Err() + if err != nil { + return errs.Wrap(err) + } + return nil +} + func (c *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) { var ( res = make(map[int]map[string]int) @@ -101,13 +110,19 @@ func (c *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, pla } func (c *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error { - pipe := c.rdb.Pipeline() - for k, v := range tokens { - pipe.HSet(ctx, k, v) - } - _, err := pipe.Exec(ctx) - if err != nil { - return errs.Wrap(err) + keys := datautil.Keys(tokens) + if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { + pipe := c.rdb.Pipeline() + for k, v := range tokens { + pipe.HSet(ctx, k, v) + } + _, err := pipe.Exec(ctx) + if err != nil { + return errs.Wrap(err) + } + return nil + }); err != nil { + return err } return nil } @@ -119,3 +134,47 @@ func (c *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, pla func (c *tokenCache) getExpireTime(t int64) time.Duration { return time.Hour * 24 * time.Duration(t) } + +// DeleteTokenByTokenMap tokens key is platformID, value is token slice +func (c *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error { + var ( + keys = make([]string, 0, len(tokens)) + keyMap = make(map[string][]string) + ) + for k, v := range tokens { + k1 := cachekey.GetTokenKey(userID, k) + keys = append(keys, k1) + keyMap[k1] = v + } + + if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { + pipe := c.rdb.Pipeline() + for k, v := range tokens { + pipe.HDel(ctx, cachekey.GetTokenKey(userID, k), v...) + } + _, err := pipe.Exec(ctx) + if err != nil { + return errs.Wrap(err) + } + return nil + }); err != nil { + return err + } + + return nil +} + +func (c *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error { + key := cachekey.GetTokenKey(userID, platformID) + if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil { + return errs.Wrap(err) + } + for _, f := range fields { + k := cachekey.GetTemporaryTokenKey(userID, platformID, f) + if err := c.rdb.Set(ctx, k, "", time.Minute*5).Err(); err != nil { + return errs.Wrap(err) + } + } + + return nil +} diff --git a/pkg/common/storage/cache/token.go b/pkg/common/storage/cache/token.go index e5e0a9383..441c08939 100644 --- a/pkg/common/storage/cache/token.go +++ b/pkg/common/storage/cache/token.go @@ -9,8 +9,11 @@ type TokenModel interface { // SetTokenFlagEx set token and flag with expire time SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) + HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error + DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error + DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error } diff --git a/pkg/common/storage/controller/auth.go b/pkg/common/storage/controller/auth.go index f9061a73b..496a434bf 100644 --- a/pkg/common/storage/controller/auth.go +++ b/pkg/common/storage/controller/auth.go @@ -17,6 +17,8 @@ import ( type AuthDatabase interface { // If the result is empty, no error is returned. GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) + + GetTemporaryTokensWithoutError(ctx context.Context, userID string, platformID int, token string) error // Create token CreateToken(ctx context.Context, userID string, platformID int) (string, error) @@ -51,6 +53,10 @@ func (a *authDatabase) GetTokensWithoutError(ctx context.Context, userID string, return a.cache.GetTokensWithoutError(ctx, userID, platformID) } +func (a *authDatabase) GetTemporaryTokensWithoutError(ctx context.Context, userID string, platformID int, token string) error { + return a.cache.HasTemporaryToken(ctx, userID, platformID, token) +} + func (a *authDatabase) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error { return a.cache.SetTokenMapByUidPid(ctx, userID, platformID, m) } @@ -86,19 +92,20 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI return "", err } - deleteTokenKey, kickedTokenKey, err := a.checkToken(ctx, tokens, platformID) + deleteTokenKey, kickedTokenKey, adminTokens, err := a.checkToken(ctx, tokens, platformID) + if err != nil { + return "", err + } + if len(deleteTokenKey) != 0 { + err = a.cache.DeleteTokenByTokenMap(ctx, userID, deleteTokenKey) if err != nil { return "", err } - if len(deleteTokenKey) != 0 { - err = a.cache.DeleteTokenByUidPid(ctx, userID, platformID, deleteTokenKey) - if err != nil { - return "", err - } - } - if len(kickedTokenKey) != 0 { - for _, k := range kickedTokenKey { - err := a.cache.SetTokenFlagEx(ctx, userID, platformID, k, constant.KickedToken) + } + if len(kickedTokenKey) != 0 { + for plt, ks := range kickedTokenKey { + for _, k := range ks { + err := a.cache.SetTokenFlagEx(ctx, userID, plt, k, constant.KickedToken) if err != nil { return "", err } @@ -106,6 +113,11 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI } } } + if len(adminTokens) != 0 { + if err = a.cache.DeleteAndSetTemporary(ctx, userID, constant.AdminPlatformID, adminTokens); err != nil { + return "", err + } + } claims := tokenverify.BuildClaims(userID, platformID, a.accessExpire) token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) @@ -123,12 +135,13 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI return tokenString, nil } -func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string]int, platformID int) ([]string, []string, error) { - // todo: Move the logic for handling old data to another location. +// checkToken will check token by tokenPolicy and return deleteToken,kickToken,deleteAdminToken +func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string]int, platformID int) (map[int][]string, map[int][]string, []string, error) { + // todo: Asynchronous deletion of old data. var ( loginTokenMap = make(map[int][]string) // The length of the value of the map must be greater than 0 - deleteToken = make([]string, 0) - kickToken = make([]string, 0) + deleteToken = make(map[int][]string) + kickToken = make(map[int][]string) adminToken = make([]string, 0) unkickTerminal = "" ) @@ -137,7 +150,7 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string for k, v := range tks { _, err := tokenverify.GetClaimFromToken(k, authverify.Secret(a.accessSecret)) if err != nil || v != constant.NormalToken { - deleteToken = append(deleteToken, k) + deleteToken[plfID] = append(deleteToken[plfID], k) } else { if plfID != constant.AdminPlatformID { loginTokenMap[plfID] = append(loginTokenMap[plfID], k) @@ -157,14 +170,15 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string } limit := a.multiLogin.MaxNumOneEnd if l > limit { - kickToken = append(kickToken, ts[:l-limit]...) + kickToken[plt] = ts[:l-limit] } } case constant.AllLoginButSameTermKick: for plt, ts := range loginTokenMap { - kickToken = append(kickToken, ts[:len(ts)-1]...) + kickToken[plt] = ts[:len(ts)-1] + if plt == platformID { - kickToken = append(kickToken, ts[len(ts)-1]) + kickToken[plt] = append(kickToken[plt], ts[len(ts)-1]) } } case constant.PCAndOther: @@ -172,29 +186,33 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string if constant.PlatformIDToClass(platformID) != unkickTerminal { for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) != unkickTerminal { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } else { var ( - preKick []string - isReserve = true + preKickToken string + preKickPlt int + reserveToken = false ) for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) != unkickTerminal { // Keep a token from another end - if isReserve { - isReserve = false - kickToken = append(kickToken, ts[:len(ts)-1]...) - preKick = append(preKick, ts[len(ts)-1]) + if !reserveToken { + reserveToken = true + kickToken[plt] = ts[:len(ts)-1] + preKickToken = ts[len(ts)-1] + preKickPlt = plt continue } else { // Prioritize keeping Android if plt == constant.AndroidPlatformID { - kickToken = append(kickToken, preKick...) - kickToken = append(kickToken, ts[:len(ts)-1]...) + if preKickToken != "" { + kickToken[preKickPlt] = append(kickToken[preKickPlt], preKickToken) + } + kickToken[plt] = ts[:len(ts)-1] } else { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } @@ -207,19 +225,19 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) == constant.PlatformIDToClass(platformID) { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } else { if _, ok := reserved[constant.PlatformIDToClass(plt)]; !ok { reserved[constant.PlatformIDToClass(plt)] = struct{}{} - kickToken = append(kickToken, ts[:len(ts)-1]...) + kickToken[plt] = ts[:len(ts)-1] continue } else { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } default: - return nil, nil, errs.New("unknown multiLogin policy").Wrap() + return nil, nil, nil, errs.New("unknown multiLogin policy").Wrap() } //var adminTokenMaxNum = a.multiLogin.MaxNumOneEnd @@ -233,5 +251,9 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string //if l > adminTokenMaxNum { // kickToken = append(kickToken, adminToken[:l-adminTokenMaxNum]...) //} - return deleteToken, kickToken, nil + var deleteAdminToken []string + if platformID == constant.AdminPlatformID { + deleteAdminToken = adminToken + } + return deleteToken, kickToken, deleteAdminToken, nil } diff --git a/pkg/common/storage/controller/client_config.go b/pkg/common/storage/controller/client_config.go new file mode 100644 index 000000000..1c3787634 --- /dev/null +++ b/pkg/common/storage/controller/client_config.go @@ -0,0 +1,58 @@ +package controller + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/pagination" + "github.com/openimsdk/tools/db/tx" +) + +type ClientConfigDatabase interface { + SetUserConfig(ctx context.Context, userID string, config map[string]string) error + GetUserConfig(ctx context.Context, userID string) (map[string]string, error) + DelUserConfig(ctx context.Context, userID string, keys []string) error + GetUserConfigPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) +} + +func NewClientConfigDatabase(db database.ClientConfig, cache cache.ClientConfigCache, tx tx.Tx) ClientConfigDatabase { + return &clientConfigDatabase{ + tx: tx, + db: db, + cache: cache, + } +} + +type clientConfigDatabase struct { + tx tx.Tx + db database.ClientConfig + cache cache.ClientConfigCache +} + +func (x *clientConfigDatabase) SetUserConfig(ctx context.Context, userID string, config map[string]string) error { + return x.tx.Transaction(ctx, func(ctx context.Context) error { + if err := x.db.Set(ctx, userID, config); err != nil { + return err + } + return x.cache.DeleteUserCache(ctx, []string{userID}) + }) +} + +func (x *clientConfigDatabase) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) { + return x.cache.GetUserConfig(ctx, userID) +} + +func (x *clientConfigDatabase) DelUserConfig(ctx context.Context, userID string, keys []string) error { + return x.tx.Transaction(ctx, func(ctx context.Context) error { + if err := x.db.Del(ctx, userID, keys); err != nil { + return err + } + return x.cache.DeleteUserCache(ctx, []string{userID}) + }) +} + +func (x *clientConfigDatabase) GetUserConfigPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) { + return x.db.GetPage(ctx, userID, key, pagination) +} diff --git a/pkg/common/storage/controller/s3.go b/pkg/common/storage/controller/s3.go index 30d8d20ec..9ab31c5a6 100644 --- a/pkg/common/storage/controller/s3.go +++ b/pkg/common/storage/controller/s3.go @@ -33,7 +33,7 @@ type S3Database interface { PartLimit() (*s3.PartLimit, error) PartSize(ctx context.Context, size int64) (int64, error) AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error) - InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error) + InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int, contentType string) (*cont.InitiateUploadResult, error) CompleteMultipartUpload(ctx context.Context, uploadID string, parts []string) (*cont.UploadResult, error) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) SetObject(ctx context.Context, info *model.Object) error @@ -73,8 +73,8 @@ func (s *s3Database) AuthSign(ctx context.Context, uploadID string, partNumbers return s.s3.AuthSign(ctx, uploadID, partNumbers) } -func (s *s3Database) InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error) { - return s.s3.InitiateUpload(ctx, hash, size, expire, maxParts) +func (s *s3Database) InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int, contentType string) (*cont.InitiateUploadResult, error) { + return s.s3.InitiateUploadContentType(ctx, hash, size, expire, maxParts, contentType) } func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID string, parts []string) (*cont.UploadResult, error) { diff --git a/pkg/common/storage/database/client_config.go b/pkg/common/storage/database/client_config.go new file mode 100644 index 000000000..7fa888d24 --- /dev/null +++ b/pkg/common/storage/database/client_config.go @@ -0,0 +1,15 @@ +package database + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/pagination" +) + +type ClientConfig interface { + Set(ctx context.Context, userID string, config map[string]string) error + Get(ctx context.Context, userID string) (map[string]string, error) + Del(ctx context.Context, userID string, keys []string) error + GetPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) +} diff --git a/pkg/common/storage/database/mgo/cache.go b/pkg/common/storage/database/mgo/cache.go new file mode 100644 index 000000000..991dfa874 --- /dev/null +++ b/pkg/common/storage/database/mgo/cache.go @@ -0,0 +1,183 @@ +package mgo + +import ( + "context" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/mongoutil" + "github.com/openimsdk/tools/errs" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func NewCacheMgo(db *mongo.Database) (*CacheMgo, error) { + coll := db.Collection(database.CacheName) + _, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "key", Value: 1}, + }, + Options: options.Index().SetUnique(true), + }, + { + Keys: bson.D{ + {Key: "expire_at", Value: 1}, + }, + Options: options.Index().SetExpireAfterSeconds(0), + }, + }) + if err != nil { + return nil, errs.Wrap(err) + } + return &CacheMgo{coll: coll}, nil +} + +type CacheMgo struct { + coll *mongo.Collection +} + +func (x *CacheMgo) findToMap(res []model.Cache, now time.Time) map[string]string { + kv := make(map[string]string) + for _, re := range res { + if re.ExpireAt != nil && re.ExpireAt.Before(now) { + continue + } + kv[re.Key] = re.Value + } + return kv + +} + +func (x *CacheMgo) Get(ctx context.Context, key []string) (map[string]string, error) { + if len(key) == 0 { + return nil, nil + } + now := time.Now() + res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{ + "key": bson.M{"$in": key}, + "$or": []bson.M{ + {"expire_at": bson.M{"$gt": now}}, + {"expire_at": nil}, + }, + }) + if err != nil { + return nil, err + } + return x.findToMap(res, now), nil +} + +func (x *CacheMgo) Prefix(ctx context.Context, prefix string) (map[string]string, error) { + now := time.Now() + res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{ + "key": bson.M{"$regex": "^" + prefix}, + "$or": []bson.M{ + {"expire_at": bson.M{"$gt": now}}, + {"expire_at": nil}, + }, + }) + if err != nil { + return nil, err + } + return x.findToMap(res, now), nil +} + +func (x *CacheMgo) Set(ctx context.Context, key string, value string, expireAt time.Duration) error { + cv := &model.Cache{ + Key: key, + Value: value, + } + if expireAt > 0 { + now := time.Now().Add(expireAt) + cv.ExpireAt = &now + } + opt := options.Update().SetUpsert(true) + return mongoutil.UpdateOne(ctx, x.coll, bson.M{"key": key}, bson.M{"$set": cv}, false, opt) +} + +func (x *CacheMgo) Incr(ctx context.Context, key string, value int) (int, error) { + pipeline := mongo.Pipeline{ + { + {"$set", bson.M{ + "value": bson.M{ + "$toString": bson.M{ + "$add": bson.A{ + bson.M{"$toInt": "$value"}, + value, + }, + }, + }, + }}, + }, + } + opt := options.FindOneAndUpdate().SetReturnDocument(options.After) + res, err := mongoutil.FindOneAndUpdate[model.Cache](ctx, x.coll, bson.M{"key": key}, pipeline, opt) + if err != nil { + return 0, err + } + return strconv.Atoi(res.Value) +} + +func (x *CacheMgo) Del(ctx context.Context, key []string) error { + if len(key) == 0 { + return nil + } + _, err := x.coll.DeleteMany(ctx, bson.M{"key": bson.M{"$in": key}}) + return errs.Wrap(err) +} + +func (x *CacheMgo) lockKey(key string) string { + return "LOCK_" + key +} + +func (x *CacheMgo) Lock(ctx context.Context, key string, duration time.Duration) (string, error) { + tmp, err := uuid.NewUUID() + if err != nil { + return "", err + } + if duration <= 0 || duration > time.Minute*10 { + duration = time.Minute * 10 + } + cv := &model.Cache{ + Key: x.lockKey(key), + Value: tmp.String(), + ExpireAt: nil, + } + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + wait := func() error { + timeout := time.NewTimer(time.Millisecond * 100) + defer timeout.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timeout.C: + return nil + } + } + for { + if err := mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": key, "expire_at": bson.M{"$lt": time.Now()}}); err != nil { + return "", err + } + expireAt := time.Now().Add(duration) + cv.ExpireAt = &expireAt + if err := mongoutil.InsertMany[*model.Cache](ctx, x.coll, []*model.Cache{cv}); err != nil { + if mongo.IsDuplicateKeyError(err) { + if err := wait(); err != nil { + return "", err + } + continue + } + return "", err + } + return cv.Value, nil + } +} + +func (x *CacheMgo) Unlock(ctx context.Context, key string, value string) error { + return mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": x.lockKey(key), "value": value}) +} diff --git a/pkg/common/storage/database/mgo/client_config.go b/pkg/common/storage/database/mgo/client_config.go new file mode 100644 index 000000000..0aa462899 --- /dev/null +++ b/pkg/common/storage/database/mgo/client_config.go @@ -0,0 +1,99 @@ +// Copyright © 2023 OpenIM open source community. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mgo + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/mongoutil" + "github.com/openimsdk/tools/db/pagination" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/openimsdk/tools/errs" +) + +func NewClientConfig(db *mongo.Database) (database.ClientConfig, error) { + coll := db.Collection("config") + _, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "key", Value: 1}, + {Key: "user_id", Value: 1}, + }, + Options: options.Index().SetUnique(true), + }, + }) + if err != nil { + return nil, errs.Wrap(err) + } + return &ClientConfig{ + coll: coll, + }, nil +} + +type ClientConfig struct { + coll *mongo.Collection +} + +func (x *ClientConfig) Set(ctx context.Context, userID string, config map[string]string) error { + if len(config) == 0 { + return nil + } + for key, value := range config { + filter := bson.M{"key": key, "user_id": userID} + update := bson.M{ + "value": value, + } + err := mongoutil.UpdateOne(ctx, x.coll, filter, bson.M{"$set": update}, false, options.Update().SetUpsert(true)) + if err != nil { + return err + } + } + return nil +} + +func (x *ClientConfig) Get(ctx context.Context, userID string) (map[string]string, error) { + cs, err := mongoutil.Find[*model.ClientConfig](ctx, x.coll, bson.M{"user_id": userID}) + if err != nil { + return nil, err + } + cm := make(map[string]string) + for _, config := range cs { + cm[config.Key] = config.Value + } + return cm, nil +} + +func (x *ClientConfig) Del(ctx context.Context, userID string, keys []string) error { + if len(keys) == 0 { + return nil + } + return mongoutil.DeleteMany(ctx, x.coll, bson.M{"key": bson.M{"$in": keys}, "user_id": userID}) +} + +func (x *ClientConfig) GetPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) { + filter := bson.M{} + if userID != "" { + filter["user_id"] = userID + } + if key != "" { + filter["key"] = key + } + return mongoutil.FindPage[*model.ClientConfig](ctx, x.coll, filter, pagination) +} diff --git a/pkg/common/storage/model/client_config.go b/pkg/common/storage/model/client_config.go new file mode 100644 index 000000000..f06e29102 --- /dev/null +++ b/pkg/common/storage/model/client_config.go @@ -0,0 +1,7 @@ +package model + +type ClientConfig struct { + Key string `bson:"key"` + UserID string `bson:"user_id"` + Value string `bson:"value"` +} diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go index 70f5acfd1..162fda596 100644 --- a/pkg/rpccache/conversation.go +++ b/pkg/rpccache/conversation.go @@ -16,6 +16,7 @@ package rpccache import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" @@ -153,6 +154,26 @@ func (c *ConversationLocalCache) getConversationNotReceiveMessageUserIDs(ctx con })) } +func (c *ConversationLocalCache) getPinnedConversationIDs(ctx context.Context, userID string) (val []string, err error) { + log.ZDebug(ctx, "ConversationLocalCache getPinnedConversations req", "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "ConversationLocalCache getPinnedConversations return", "userID", userID, "value", val) + } else { + log.ZError(ctx, "ConversationLocalCache getPinnedConversations return", err, "userID", userID) + } + }() + var cache cacheProto[pbconversation.GetPinnedConversationIDsResp] + resp, err := cache.Unmarshal(c.local.Get(ctx, cachekey.GetPinnedConversationIDs(userID), func(ctx context.Context) ([]byte, error) { + log.ZDebug(ctx, "ConversationLocalCache getConversationNotReceiveMessageUserIDs rpc", "userID", userID) + return cache.Marshal(c.client.ConversationClient.GetPinnedConversationIDs(ctx, &pbconversation.GetPinnedConversationIDsReq{UserID: userID})) + })) + if err != nil { + return nil, err + } + return resp.ConversationIDs, nil +} + func (c *ConversationLocalCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) { res, err := c.getConversationNotReceiveMessageUserIDs(ctx, conversationID) if err != nil { @@ -168,3 +189,7 @@ func (c *ConversationLocalCache) GetConversationNotReceiveMessageUserIDMap(ctx c } return datautil.SliceSet(res.UserIDs), nil } + +func (c *ConversationLocalCache) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) { + return c.getPinnedConversationIDs(ctx, userID) +} diff --git a/test/stress-test-v2/README.md b/test/stress-test-v2/README.md new file mode 100644 index 000000000..cbd4bdbde --- /dev/null +++ b/test/stress-test-v2/README.md @@ -0,0 +1,19 @@ +# Stress Test V2 + +## Usage + +You need set `TestTargetUserList` variables. + +### Build + +```bash + +go build -o test/stress-test-v2/stress-test-v2 test/stress-test-v2/main.go +``` + +### Excute + +```bash + +tools/stress-test-v2/stress-test-v2 -c config/ +``` diff --git a/test/stress-test-v2/main.go b/test/stress-test-v2/main.go new file mode 100644 index 000000000..0e4609964 --- /dev/null +++ b/test/stress-test-v2/main.go @@ -0,0 +1,759 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +// 1. Create 100K New Users +// 2. Create 100 100K Groups +// 3. Create 1000 999 Groups +// 4. Send message to 100K Groups every second +// 5. Send message to 999 Groups every minute + +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + // "", + } + // DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + UserCheck = "/user/account_check" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + GetGroupMemberInfo = "/group/get_group_members_info" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 100000 + Max1kUser = 1000 + Max100KGroup = 100 + Max999Group = 1000 + MaxInviteUserLimit = 999 + + CreateUserTicker = 1 * time.Second + CreateGroupTicker = 1 * time.Second + Create100KGroupTicker = 1 * time.Second + Create999GroupTicker = 1 * time.Second + SendMsgTo100KGroupTicker = 1 * time.Second + SendMsgTo999GroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + CreateUserCounter int + Create100kGroupCounter int + Create999GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + // log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CheckUser(ctx context.Context, userIDs []string) ([]string, error) { + req := pbuser.AccountCheckReq{ + CheckUserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+UserCheck, &req) + if err != nil { + return nil, err + } + + data := &pbuser.AccountCheckResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + unRegisteredUserIDs := make([]string, 0) + + for _, res := range data.Results { + if res.AccountStatus == constant.UnRegistered { + unRegisteredUserIDs = append(unRegisteredUserIDs, res.UserID) + } + } + + return unRegisteredUserIDs, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) CreateUserBatch(ctx context.Context, userIDs []string) error { + // The method can import a large number of users at once. + var userList []*sdkws.UserInfo + + defer st.Once.Do( + func() { + st.DefaultUserID = userIDs[0] + fmt.Println("Default Send User Created ID:", st.DefaultUserID) + }) + + needUserIDs, err := st.CheckUser(ctx, userIDs) + if err != nil { + return err + } + + for _, userID := range needUserIDs { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + userList = append(userList, user) + } + + req := pbuser.UserRegisterReq{ + Users: userList, + } + + _, err = st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return err + } + + st.UserCounter += len(userList) + return nil +} + +func (st *StressTest) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]string, error) { + needInviteUserIDs := make([]string, 0) + + const maxBatchSize = 500 + if len(userIDs) > maxBatchSize { + for i := 0; i < len(userIDs); i += maxBatchSize { + end := min(i+maxBatchSize, len(userIDs)) + batchUserIDs := userIDs[i:end] + + // log.ZInfo(ctx, "Processing group members batch", "groupID", groupID, "batch", i/maxBatchSize+1, + // "batchUserCount", len(batchUserIDs)) + + // Process a single batch + batchReq := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: batchUserIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &batchReq) + if err != nil { + log.ZError(ctx, "Batch query failed", err, "batch", i/maxBatchSize+1) + continue + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + log.ZError(ctx, "Failed to parse batch response", err, "batch", i/maxBatchSize+1) + continue + } + + // Process the batch results + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range batchUserIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + } + + return needInviteUserIDs, nil + } + + req := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &req) + if err != nil { + return nil, err + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range userIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + + return needInviteUserIDs, nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, groupID string, userIDs []string) error { + req := group.InviteUserToGroupReq{ + GroupID: groupID, + InvitedUserIDs: userIDs, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string, groupID string) error { + contentObj := map[string]any{ + // "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + "content": fmt.Sprintf("The current time is %s", time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: groupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +// Max userIDs number is 1000 +func (st *StressTest) CreateGroup(ctx context.Context, groupID string, userID string, userIDsList []string) (string, error) { + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: userIDsList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + // st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + // ch := make(chan struct{}) + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + go func() { + // time.Sleep(5 * time.Second) + fmt.Println("Force exit") + os.Exit(0) + }() + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + for i := range MaxUser { + userID := fmt.Sprintf("v2_StressTest_User_%d", i) + st.CreatedUsers = append(st.CreatedUsers, userID) + st.CreateUserCounter++ + } + + // err = st.CreateUserBatch(st.Ctx, st.CreatedUsers) + // if err != nil { + // log.ZError(ctx, "Create user failed.", err) + // } + + const batchSize = 1000 + totalUsers := len(st.CreatedUsers) + successCount := 0 + + if st.DefaultUserID == "" && len(st.CreatedUsers) > 0 { + st.DefaultUserID = st.CreatedUsers[0] + } + + for i := 0; i < totalUsers; i += batchSize { + end := min(i+batchSize, totalUsers) + + userBatch := st.CreatedUsers[i:end] + log.ZInfo(st.Ctx, "Creating user batch", "batch", i/batchSize+1, "count", len(userBatch)) + + err = st.CreateUserBatch(st.Ctx, userBatch) + if err != nil { + log.ZError(st.Ctx, "Batch user creation failed", err, "batch", i/batchSize+1) + } else { + successCount += len(userBatch) + log.ZInfo(st.Ctx, "Batch user creation succeeded", "batch", i/batchSize+1, + "progress", fmt.Sprintf("%d/%d", successCount, totalUsers)) + } + } + + // Execute create 100k group + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create100kGroupTicker := time.NewTicker(Create100KGroupTicker) + defer create100kGroupTicker.Stop() + + for i := range Max100KGroup { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 100K Group") + return + + case <-create100kGroupTicker.C: + // Create 100K groups + st.Wg.Add(1) + go func(idx int) { + startTime := time.Now() + defer func() { + elapsedTime := time.Since(startTime) + log.ZInfo(st.Ctx, "100K group creation completed", + "groupID", fmt.Sprintf("v2_StressTest_Group_100K_%d", idx), + "index", idx, + "duration", elapsedTime.String()) + }() + + defer st.Wg.Done() + defer func() { + st.Mutex.Lock() + st.Create100kGroupCounter++ + st.Mutex.Unlock() + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + + for i := 0; i <= MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // create 999 groups + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create999GroupTicker := time.NewTicker(Create999GroupTicker) + defer create999GroupTicker.Stop() + + for i := range Max999Group { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 999 Group") + return + + case <-create999GroupTicker.C: + // Create 999 groups + st.Wg.Add(1) + go func(idx int) { + startTime := time.Now() + defer func() { + elapsedTime := time.Since(startTime) + log.ZInfo(st.Ctx, "999 group creation completed", + "groupID", fmt.Sprintf("v2_StressTest_Group_1K_%d", idx), + "index", idx, + "duration", elapsedTime.String()) + }() + + defer st.Wg.Done() + defer func() { + st.Mutex.Lock() + st.Create999GroupCounter++ + st.Mutex.Unlock() + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + for i := 0; i <= Max1kUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, Max1kUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // Send message to 100K groups + st.Wg.Wait() + fmt.Println("All groups created successfully, starting to send messages...") + log.ZInfo(ctx, "All groups created successfully, starting to send messages...") + + var groups100K []string + var groups999 []string + + for i := range Max100KGroup { + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", i) + groups100K = append(groups100K, groupID) + } + + for i := range Max999Group { + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", i) + groups999 = append(groups999, groupID) + } + + send100kGroupLimiter := make(chan struct{}, 20) + send999GroupLimiter := make(chan struct{}, 100) + + // execute Send message to 100K groups + go func() { + ticker := time.NewTicker(SendMsgTo100KGroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 100K Group") + return + + case <-ticker.C: + // Send message to 100K groups + for _, groupID := range groups100K { + send100kGroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send100kGroupLimiter }() + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 100K group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 100K groups successfully.") + } + } + }() + + // execute Send message to 999 groups + go func() { + ticker := time.NewTicker(SendMsgTo999GroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 999 Group") + return + + case <-ticker.C: + // Send message to 999 groups + for _, groupID := range groups999 { + send999GroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send999GroupLimiter }() + + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 999 group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 999 groups successfully.") + } + } + }() + + <-st.Ctx.Done() + fmt.Println("Received signal to exit, shutting down...") +} diff --git a/test/stress-test/README.md b/test/stress-test/README.md new file mode 100644 index 000000000..cba93e279 --- /dev/null +++ b/test/stress-test/README.md @@ -0,0 +1,19 @@ +# Stress Test + +## Usage + +You need set `TestTargetUserList` and `DefaultGroupID` variables. + +### Build + +```bash + +go build -o test/stress-test/stress-test test/stress-test/main.go +``` + +### Excute + +```bash + +tools/stress-test/stress-test -c config/ +``` diff --git a/test/stress-test/main.go b/test/stress-test/main.go new file mode 100755 index 000000000..6adbd12ee --- /dev/null +++ b/test/stress-test/main.go @@ -0,0 +1,458 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: st.DefaultGroupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: TestTargetUserList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} diff --git a/tools/s3/internal/conversion.go b/tools/s3/internal/conversion.go index ba2174535..af391ec42 100644 --- a/tools/s3/internal/conversion.go +++ b/tools/s3/internal/conversion.go @@ -4,6 +4,11 @@ import ( "context" "errors" "fmt" + "log" + "net/http" + "path/filepath" + "time" + "github.com/mitchellh/mapstructure" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" @@ -19,10 +24,6 @@ import ( "github.com/openimsdk/tools/s3/oss" "github.com/spf13/viper" "go.mongodb.org/mongo-driver/mongo" - "log" - "net/http" - "path/filepath" - "time" ) const defaultTimeout = time.Second * 10 @@ -159,7 +160,7 @@ func doObject(db database.ObjectInfo, newS3, oldS3 s3.Interface, skip int) (*Res if err != nil { return nil, err } - putURL, err := newS3.PresignedPutObject(ctx, obj.Key, time.Hour) + putURL, err := newS3.PresignedPutObject(ctx, obj.Key, time.Hour, &s3.PutOption{ContentType: obj.ContentType}) if err != nil { return nil, err } @@ -176,7 +177,7 @@ func doObject(db database.ObjectInfo, newS3, oldS3 s3.Interface, skip int) (*Res return nil, fmt.Errorf("download object failed %s", downloadResp.Status) } log.Printf("file size %d", obj.Size) - request, err := http.NewRequest(http.MethodPut, putURL, downloadResp.Body) + request, err := http.NewRequest(http.MethodPut, putURL.URL, downloadResp.Body) if err != nil { return nil, err } diff --git a/tools/seq/internal/main.go b/tools/seq/internal/main.go index 7e5d5598c..9fd352a96 100644 --- a/tools/seq/internal/main.go +++ b/tools/seq/internal/main.go @@ -337,7 +337,7 @@ func SetVersion(coll *mongo.Collection, key string, version int) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() option := options.Update().SetUpsert(true) - filter := bson.M{"key": key, "value": strconv.Itoa(version)} + filter := bson.M{"key": key} update := bson.M{"$set": bson.M{"key": key, "value": strconv.Itoa(version)}} return mongoutil.UpdateOne(ctx, coll, filter, update, false, option) } diff --git a/tools/stress-test-v2/main.go b/tools/stress-test-v2/main.go new file mode 100644 index 000000000..0c309b9c9 --- /dev/null +++ b/tools/stress-test-v2/main.go @@ -0,0 +1,736 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +// 1. Create 100K New Users +// 2. Create 100 100K Groups +// 3. Create 1000 999 Groups +// 4. Send message to 100K Groups every second +// 5. Send message to 999 Groups every minute + +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + // "", + } + // DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + UserCheck = "/user/account_check" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + GetGroupMemberInfo = "/group/get_group_members_info" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 100000 + Max100KGroup = 100 + Max999Group = 1000 + MaxInviteUserLimit = 999 + + CreateUserTicker = 1 * time.Second + CreateGroupTicker = 1 * time.Second + Create100KGroupTicker = 1 * time.Second + Create999GroupTicker = 1 * time.Second + SendMsgTo100KGroupTicker = 1 * time.Second + SendMsgTo999GroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + CreateUserCounter int + Create100kGroupCounter int + Create999GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CheckUser(ctx context.Context, userIDs []string) ([]string, error) { + req := pbuser.AccountCheckReq{ + CheckUserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+UserCheck, &req) + if err != nil { + return nil, err + } + + data := &pbuser.AccountCheckResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + unRegisteredUserIDs := make([]string, 0) + + for _, res := range data.Results { + if res.AccountStatus == constant.UnRegistered { + unRegisteredUserIDs = append(unRegisteredUserIDs, res.UserID) + } + } + + return unRegisteredUserIDs, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) CreateUserBatch(ctx context.Context, userIDs []string) error { + // The method can import a large number of users at once. + var userList []*sdkws.UserInfo + + defer st.Once.Do( + func() { + st.DefaultUserID = userIDs[0] + fmt.Println("Default Send User Created ID:", st.DefaultUserID) + }) + + needUserIDs, err := st.CheckUser(ctx, userIDs) + if err != nil { + return err + } + + for _, userID := range needUserIDs { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + userList = append(userList, user) + } + + req := pbuser.UserRegisterReq{ + Users: userList, + } + + _, err = st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return err + } + + st.UserCounter += len(userList) + return nil +} + +func (st *StressTest) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]string, error) { + needInviteUserIDs := make([]string, 0) + + const maxBatchSize = 500 + if len(userIDs) > maxBatchSize { + for i := 0; i < len(userIDs); i += maxBatchSize { + end := min(i+maxBatchSize, len(userIDs)) + batchUserIDs := userIDs[i:end] + + // log.ZInfo(ctx, "Processing group members batch", "groupID", groupID, "batch", i/maxBatchSize+1, + // "batchUserCount", len(batchUserIDs)) + + // Process a single batch + batchReq := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: batchUserIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &batchReq) + if err != nil { + log.ZError(ctx, "Batch query failed", err, "batch", i/maxBatchSize+1) + continue + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + log.ZError(ctx, "Failed to parse batch response", err, "batch", i/maxBatchSize+1) + continue + } + + // Process the batch results + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range batchUserIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + } + + return needInviteUserIDs, nil + } + + req := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &req) + if err != nil { + return nil, err + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range userIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + + return needInviteUserIDs, nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, groupID string, userIDs []string) error { + req := group.InviteUserToGroupReq{ + GroupID: groupID, + InvitedUserIDs: userIDs, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string, groupID string) error { + contentObj := map[string]any{ + // "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + "content": fmt.Sprintf("The current time is %s", time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: groupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +// Max userIDs number is 1000 +func (st *StressTest) CreateGroup(ctx context.Context, groupID string, userID string, userIDsList []string) (string, error) { + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: userIDsList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + // st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + // ch := make(chan struct{}) + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + go func() { + // time.Sleep(5 * time.Second) + fmt.Println("Force exit") + os.Exit(0) + }() + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + for i := range MaxUser { + userID := fmt.Sprintf("v2_StressTest_User_%d", i) + st.CreatedUsers = append(st.CreatedUsers, userID) + st.CreateUserCounter++ + } + + // err = st.CreateUserBatch(st.Ctx, st.CreatedUsers) + // if err != nil { + // log.ZError(ctx, "Create user failed.", err) + // } + + const batchSize = 1000 + totalUsers := len(st.CreatedUsers) + successCount := 0 + + if st.DefaultUserID == "" && len(st.CreatedUsers) > 0 { + st.DefaultUserID = st.CreatedUsers[0] + } + + for i := 0; i < totalUsers; i += batchSize { + end := min(i+batchSize, totalUsers) + + userBatch := st.CreatedUsers[i:end] + log.ZInfo(st.Ctx, "Creating user batch", "batch", i/batchSize+1, "count", len(userBatch)) + + err = st.CreateUserBatch(st.Ctx, userBatch) + if err != nil { + log.ZError(st.Ctx, "Batch user creation failed", err, "batch", i/batchSize+1) + } else { + successCount += len(userBatch) + log.ZInfo(st.Ctx, "Batch user creation succeeded", "batch", i/batchSize+1, + "progress", fmt.Sprintf("%d/%d", successCount, totalUsers)) + } + } + + // Execute create 100k group + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create100kGroupTicker := time.NewTicker(Create100KGroupTicker) + defer create100kGroupTicker.Stop() + + for i := range Max100KGroup { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 100K Group") + return + + case <-create100kGroupTicker.C: + // Create 100K groups + st.Wg.Add(1) + go func(idx int) { + defer st.Wg.Done() + defer func() { + st.Create100kGroupCounter++ + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + + for i := 0; i < MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // create 999 groups + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create999GroupTicker := time.NewTicker(Create999GroupTicker) + defer create999GroupTicker.Stop() + + for i := range Max999Group { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 999 Group") + return + + case <-create999GroupTicker.C: + // Create 999 groups + st.Wg.Add(1) + go func(idx int) { + defer st.Wg.Done() + defer func() { + st.Create999GroupCounter++ + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + for i := 0; i < MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // Send message to 100K groups + st.Wg.Wait() + fmt.Println("All groups created successfully, starting to send messages...") + log.ZInfo(ctx, "All groups created successfully, starting to send messages...") + + var groups100K []string + var groups999 []string + + for i := range Max100KGroup { + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", i) + groups100K = append(groups100K, groupID) + } + + for i := range Max999Group { + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", i) + groups999 = append(groups999, groupID) + } + + send100kGroupLimiter := make(chan struct{}, 20) + send999GroupLimiter := make(chan struct{}, 100) + + // execute Send message to 100K groups + go func() { + ticker := time.NewTicker(SendMsgTo100KGroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 100K Group") + return + + case <-ticker.C: + // Send message to 100K groups + for _, groupID := range groups100K { + send100kGroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send100kGroupLimiter }() + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 100K group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 100K groups successfully.") + } + } + }() + + // execute Send message to 999 groups + go func() { + ticker := time.NewTicker(SendMsgTo999GroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 999 Group") + return + + case <-ticker.C: + // Send message to 999 groups + for _, groupID := range groups999 { + send999GroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send999GroupLimiter }() + + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 999 group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 999 groups successfully.") + } + } + }() + + <-st.Ctx.Done() + fmt.Println("Received signal to exit, shutting down...") +} diff --git a/tools/stress-test/README.md b/tools/stress-test/README.md new file mode 100644 index 000000000..531233a20 --- /dev/null +++ b/tools/stress-test/README.md @@ -0,0 +1,25 @@ +# Stress Test + +## Usage + +You need set `TestTargetUserList` and `DefaultGroupID` variables. + +### Build + +```bash +go build -o _output/bin/tools/linux/amd64/stress-test tools/stress-test/main.go + +# or + +go build -o tools/stress-test/stress-test tools/stress-test/main.go +``` + +### Excute + +```bash +_output/bin/tools/linux/amd64/stress-test -c config/ + +#or + +tools/stress-test/stress-test -c config/ +``` diff --git a/tools/stress-test/main.go b/tools/stress-test/main.go new file mode 100755 index 000000000..f845b5e93 --- /dev/null +++ b/tools/stress-test/main.go @@ -0,0 +1,459 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: st.DefaultGroupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: TestTargetUserList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultSendUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultSendUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} diff --git a/version/version.go b/version/version.go index 23b3a82f5..32ad27808 100644 --- a/version/version.go +++ b/version/version.go @@ -1,6 +1,14 @@ package version -import _ "embed" +import ( + _ "embed" + "strings" +) //go:embed version var Version string + +func init() { + Version = strings.Trim(Version, "\n") + Version = strings.TrimSpace(Version) +}