From eeaabb391fe9158aa36e183c8519fc2f725d7617 Mon Sep 17 00:00:00 2001 From: icey-yu <119291641+icey-yu@users.noreply.github.com> Date: Thu, 13 Mar 2025 18:21:48 +0800 Subject: [PATCH 01/14] feat: check if the secret in config/share.yml has been changed during registration (#3223) * feat: check if the secret in config/share.yml has been changed during registration. * fix: cicd * fix: code * fix: cicd * fix: cicd * fix: cicd * fix: cicd * fix: cicd --- .github/workflows/go-build-test.yml | 25 ++++++++++++++++++++----- internal/rpc/user/user.go | 8 ++++++++ pkg/common/servererrs/code.go | 14 ++++++++------ pkg/common/servererrs/predefine.go | 2 ++ 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/.github/workflows/go-build-test.yml b/.github/workflows/go-build-test.yml index 4033603e6..9e2aa3f1c 100644 --- a/.github/workflows/go-build-test.yml +++ b/.github/workflows/go-build-test.yml @@ -12,6 +12,10 @@ jobs: go-build: name: Test with go ${{ matrix.go_version }} on ${{ matrix.os }} runs-on: ${{ matrix.os }} + + env: + SHARE_CONFIG_PATH: config/share.yml + permissions: contents: write pull-requests: write @@ -40,6 +44,10 @@ jobs: with: compose-file: "./docker-compose.yml" + - name: Modify Server Configuration + run: | + yq e '.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} + # - name: Get Internal IP Address # id: get-ip # run: | @@ -71,6 +79,11 @@ jobs: go mod download go install github.com/magefile/mage@latest + - name: Modify Chat Configuration + run: | + cd ${{ github.workspace }}/chat-repo + yq e '.openIM.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} + - name: Build and test Chat Services run: | cd ${{ github.workspace }}/chat-repo @@ -132,7 +145,7 @@ jobs: # Test get admin token get_admin_token_response=$(curl -X POST -H "Content-Type: application/json" -H "operationID: imAdmin" -d '{ - "secret": "openIM123", + "secret": "123456", "platformID": 2, "userID": "imAdmin" }' http://127.0.0.1:10002/auth/get_admin_token) @@ -169,7 +182,8 @@ jobs: contents: write env: SDK_DIR: openim-sdk-core - CONFIG_PATH: config/notification.yml + NOTIFICATION_CONFIG_PATH: config/notification.yml + SHARE_CONFIG_PATH: config/share.yml strategy: matrix: @@ -184,7 +198,7 @@ jobs: uses: actions/checkout@v4 with: repository: "openimsdk/openim-sdk-core" - ref: "release-v3.8" + ref: "main" path: ${{ env.SDK_DIR }} - name: Set up Go ${{ matrix.go_version }} @@ -199,8 +213,9 @@ jobs: - name: Modify Server Configuration run: | - yq e '.groupCreated.isSendMsg = true' -i ${{ env.CONFIG_PATH }} - yq e '.friendApplicationApproved.isSendMsg = true' -i ${{ env.CONFIG_PATH }} + yq e '.groupCreated.isSendMsg = true' -i ${{ env.NOTIFICATION_CONFIG_PATH }} + yq e '.friendApplicationApproved.isSendMsg = true' -i ${{ env.NOTIFICATION_CONFIG_PATH }} + yq e '.secret = 123456' -i ${{ env.SHARE_CONFIG_PATH }} - name: Start Server Services run: | diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index d4fe7ecc4..cff070b44 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -51,6 +51,10 @@ import ( "google.golang.org/grpc" ) +const ( + defaultSecret = "openIM123" +) + type userServer struct { pbuser.UnimplementedUserServer online cache.OnlineCache @@ -273,6 +277,10 @@ func (s *userServer) UserRegister(ctx context.Context, req *pbuser.UserRegisterR if len(req.Users) == 0 { return nil, errs.ErrArgs.WrapMsg("users is empty") } + // check if secret is changed + if s.config.Share.Secret == defaultSecret { + return nil, servererrs.ErrSecretNotChanged.Wrap() + } if err = authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { return nil, err diff --git a/pkg/common/servererrs/code.go b/pkg/common/servererrs/code.go index 3d0aa4a71..906f890a5 100644 --- a/pkg/common/servererrs/code.go +++ b/pkg/common/servererrs/code.go @@ -37,7 +37,8 @@ const ( // General error codes. const ( - NoError = 0 // No error + NoError = 0 // No error + DatabaseError = 90002 // Database error (redis/mysql, etc.) NetworkError = 90004 // Network error DataError = 90007 // Data error @@ -45,11 +46,12 @@ const ( CallbackError = 80000 // General error codes. - ServerInternalError = 500 // Server internal error - ArgsError = 1001 // Input parameter error - NoPermissionError = 1002 // Insufficient permission - DuplicateKeyError = 1003 - RecordNotFoundError = 1004 // Record does not exist + ServerInternalError = 500 // Server internal error + ArgsError = 1001 // Input parameter error + NoPermissionError = 1002 // Insufficient permission + DuplicateKeyError = 1003 + RecordNotFoundError = 1004 // Record does not exist + SecretNotChangedError = 1050 // secret not changed // Account error codes. UserIDNotFoundError = 1101 // UserID does not exist or is not registered diff --git a/pkg/common/servererrs/predefine.go b/pkg/common/servererrs/predefine.go index ab09aa512..b1d6b06a9 100644 --- a/pkg/common/servererrs/predefine.go +++ b/pkg/common/servererrs/predefine.go @@ -17,6 +17,8 @@ package servererrs import "github.com/openimsdk/tools/errs" var ( + ErrSecretNotChanged = errs.NewCodeError(SecretNotChangedError, "secret not changed, please change secret in config/share.yml for security reasons") + ErrDatabase = errs.NewCodeError(DatabaseError, "DatabaseError") ErrNetwork = errs.NewCodeError(NetworkError, "NetworkError") ErrCallback = errs.NewCodeError(CallbackError, "CallbackError") From a07fc920b337d5617fbe33ee22cc2fb4c0e76595 Mon Sep 17 00:00:00 2001 From: Monet Lee Date: Fri, 28 Mar 2025 15:46:42 +0800 Subject: [PATCH 02/14] feat: implement stress-test tools. (#3261) * feat: implement stress-test tools. * revert config file. --- config/share.yml | 4 +- tools/stress-test/README.md | 25 ++ tools/stress-test/main.go | 452 ++++++++++++++++++++++++++++++++++++ 3 files changed, 479 insertions(+), 2 deletions(-) create mode 100644 tools/stress-test/README.md create mode 100755 tools/stress-test/main.go diff --git a/config/share.yml b/config/share.yml index a5fbeac75..0913c1e88 100644 --- a/config/share.yml +++ b/config/share.yml @@ -1,9 +1,9 @@ secret: openIM123 -imAdminUserID: [ imAdmin ] +imAdminUserID: [imAdmin] # 1: For Android, iOS, Windows, Mac, and web platforms, only one instance can be online at a time multiLogin: policy: 1 # max num of tokens in one end - maxNumOneEnd: 30 \ No newline at end of file + maxNumOneEnd: 30 diff --git a/tools/stress-test/README.md b/tools/stress-test/README.md new file mode 100644 index 000000000..531233a20 --- /dev/null +++ b/tools/stress-test/README.md @@ -0,0 +1,25 @@ +# Stress Test + +## Usage + +You need set `TestTargetUserList` and `DefaultGroupID` variables. + +### Build + +```bash +go build -o _output/bin/tools/linux/amd64/stress-test tools/stress-test/main.go + +# or + +go build -o tools/stress-test/stress-test tools/stress-test/main.go +``` + +### Excute + +```bash +_output/bin/tools/linux/amd64/stress-test -c config/ + +#or + +tools/stress-test/stress-test -c config/ +``` diff --git a/tools/stress-test/main.go b/tools/stress-test/main.go new file mode 100755 index 000000000..ee58c9749 --- /dev/null +++ b/tools/stress-test/main.go @@ -0,0 +1,452 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultSendUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := map[string]any{ + "sendID": userID, + "groupID": st.DefaultGroupID, + "contentType": constant.Text, + "sessionType": constant.ReadGroupChatType, + "content": contentObj, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + req := map[string]any{ + "memberUserIDs": TestTargetUserList, + "ownerUserID": userID, + "groupInfo": map[string]any{ + "groupID": groupID, + "groupName": groupID, + "groupType": constant.WorkingGroup, + }, + } + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultSendUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultSendUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultSendUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultSendUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultSendUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} From 304bf00ef28af606cee49819c7d12ece0906759d Mon Sep 17 00:00:00 2001 From: Monet Lee Date: Mon, 31 Mar 2025 16:45:52 +0800 Subject: [PATCH 03/14] fix: improve stress test tools parms. (#3265) * feat: implement stress-test tools. * revert config file. * fix: improve tools parms. * fix modify args. --- tools/stress-test/main.go | 459 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 459 insertions(+) create mode 100755 tools/stress-test/main.go diff --git a/tools/stress-test/main.go b/tools/stress-test/main.go new file mode 100755 index 000000000..aa52b69ed --- /dev/null +++ b/tools/stress-test/main.go @@ -0,0 +1,459 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultSendUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: st.DefaultGroupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: TestTargetUserList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultSendUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultSendUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultSendUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultSendUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultSendUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} From c1648f3dd544ecfad3458d1c177dd64eb5d8fe2c Mon Sep 17 00:00:00 2001 From: chao <48119764+withchao@users.noreply.github.com> Date: Tue, 1 Apr 2025 17:13:32 +0800 Subject: [PATCH 04/14] fix: oss specifies content-type when uploading (#3267) * pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * fix: DeleteDoc crash * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: user msg timestamp * seq read config * seq read config * fix: the source message of the reference is withdrawn, and the referenced message is deleted * feat: optimize the default notification.yml * fix: shouldPushOffline * fix: the sorting is wrong after canceling the administrator in group settings * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * fix: oss specifies content-type when uploading * fix: the version number contains a line break * fix: the version number contains a line break --- go.mod | 4 ++-- go.sum | 12 +++++----- internal/api/init.go | 35 ++++++++++++++--------------- internal/rpc/third/s3.go | 2 +- pkg/common/storage/controller/s3.go | 6 ++--- tools/s3/internal/conversion.go | 13 ++++++----- version/version.go | 10 ++++++++- 7 files changed, 45 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index 0a9de4010..0eb72edf7 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.72-alpha.79 - github.com/openimsdk/tools v0.0.50-alpha.74 + github.com/openimsdk/protocol v0.0.72-alpha.81 + github.com/openimsdk/tools v0.0.50-alpha.79 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 66af77379..aa0dfa6ac 100644 --- a/go.sum +++ b/go.sum @@ -345,12 +345,12 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= -github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= -github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.72-alpha.79 h1:e46no8WVAsmTzyy405klrdoUiG7u+1ohDsXvQuFng4s= -github.com/openimsdk/protocol v0.0.72-alpha.79/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= -github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc= -github.com/openimsdk/tools v0.0.50-alpha.74/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= +github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrkXcDACCqtyM= +github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= +github.com/openimsdk/protocol v0.0.72-alpha.81 h1:6tDuZ3Anfi1uhX/V5mWxITqJnGQPnvgeaxeqJlEHIVE= +github.com/openimsdk/protocol v0.0.72-alpha.81/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= +github.com/openimsdk/tools v0.0.50-alpha.79 h1:jxYEbrzaze4Z2r4NrKad816buZ690ix0L9MTOOOH3ik= +github.com/openimsdk/tools v0.0.50-alpha.79/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= diff --git a/internal/api/init.go b/internal/api/init.go index 4bd29c9e0..1e0f1075f 100644 --- a/internal/api/init.go +++ b/internal/api/init.go @@ -144,24 +144,23 @@ func Start(ctx context.Context, index int, config *Config) error { } }() - if config.Discovery.Enable == conf.ETCD { - cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames()) - cm.Watch(ctx) - } - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM) - - shutdown := func() error { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - err := server.Shutdown(ctx) - if err != nil { - return errs.WrapMsg(err, "shutdown err") - } - return nil - } - disetcd.RegisterShutDown(shutdown) + //if config.Discovery.Enable == conf.ETCD { + // cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames()) + // cm.Watch(ctx) + //} + //sigs := make(chan os.Signal, 1) + //signal.Notify(sigs, syscall.SIGTERM) + //select { + //case val := <-sigs: + // log.ZDebug(ctx, "recv exit", "signal", val.String()) + // cancel(fmt.Errorf("signal %s", val.String())) + //case <-ctx.Done(): + //} + <-apiCtx.Done() + exitCause := context.Cause(apiCtx) + log.ZWarn(ctx, "api server exit", exitCause) + timer := time.NewTimer(time.Second * 15) + defer timer.Stop() select { case <-sigs: program.SIGTERMExit() diff --git a/internal/rpc/third/s3.go b/internal/rpc/third/s3.go index 97206dd6d..757320dac 100644 --- a/internal/rpc/third/s3.go +++ b/internal/rpc/third/s3.go @@ -62,7 +62,7 @@ func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.In return nil, err } expireTime := time.Now().Add(t.defaultExpire) - result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts)) + result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts), req.ContentType) if err != nil { if haErr, ok := errs.Unwrap(err).(*cont.HashAlreadyExistsError); ok { obj := &model.Object{ diff --git a/pkg/common/storage/controller/s3.go b/pkg/common/storage/controller/s3.go index 30d8d20ec..9ab31c5a6 100644 --- a/pkg/common/storage/controller/s3.go +++ b/pkg/common/storage/controller/s3.go @@ -33,7 +33,7 @@ type S3Database interface { PartLimit() (*s3.PartLimit, error) PartSize(ctx context.Context, size int64) (int64, error) AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error) - InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error) + InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int, contentType string) (*cont.InitiateUploadResult, error) CompleteMultipartUpload(ctx context.Context, uploadID string, parts []string) (*cont.UploadResult, error) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (time.Time, string, error) SetObject(ctx context.Context, info *model.Object) error @@ -73,8 +73,8 @@ func (s *s3Database) AuthSign(ctx context.Context, uploadID string, partNumbers return s.s3.AuthSign(ctx, uploadID, partNumbers) } -func (s *s3Database) InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*cont.InitiateUploadResult, error) { - return s.s3.InitiateUpload(ctx, hash, size, expire, maxParts) +func (s *s3Database) InitiateMultipartUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int, contentType string) (*cont.InitiateUploadResult, error) { + return s.s3.InitiateUploadContentType(ctx, hash, size, expire, maxParts, contentType) } func (s *s3Database) CompleteMultipartUpload(ctx context.Context, uploadID string, parts []string) (*cont.UploadResult, error) { diff --git a/tools/s3/internal/conversion.go b/tools/s3/internal/conversion.go index ba2174535..af391ec42 100644 --- a/tools/s3/internal/conversion.go +++ b/tools/s3/internal/conversion.go @@ -4,6 +4,11 @@ import ( "context" "errors" "fmt" + "log" + "net/http" + "path/filepath" + "time" + "github.com/mitchellh/mapstructure" "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" @@ -19,10 +24,6 @@ import ( "github.com/openimsdk/tools/s3/oss" "github.com/spf13/viper" "go.mongodb.org/mongo-driver/mongo" - "log" - "net/http" - "path/filepath" - "time" ) const defaultTimeout = time.Second * 10 @@ -159,7 +160,7 @@ func doObject(db database.ObjectInfo, newS3, oldS3 s3.Interface, skip int) (*Res if err != nil { return nil, err } - putURL, err := newS3.PresignedPutObject(ctx, obj.Key, time.Hour) + putURL, err := newS3.PresignedPutObject(ctx, obj.Key, time.Hour, &s3.PutOption{ContentType: obj.ContentType}) if err != nil { return nil, err } @@ -176,7 +177,7 @@ func doObject(db database.ObjectInfo, newS3, oldS3 s3.Interface, skip int) (*Res return nil, fmt.Errorf("download object failed %s", downloadResp.Status) } log.Printf("file size %d", obj.Size) - request, err := http.NewRequest(http.MethodPut, putURL, downloadResp.Body) + request, err := http.NewRequest(http.MethodPut, putURL.URL, downloadResp.Body) if err != nil { return nil, err } diff --git a/version/version.go b/version/version.go index 23b3a82f5..32ad27808 100644 --- a/version/version.go +++ b/version/version.go @@ -1,6 +1,14 @@ package version -import _ "embed" +import ( + _ "embed" + "strings" +) //go:embed version var Version string + +func init() { + Version = strings.Trim(Version, "\n") + Version = strings.TrimSpace(Version) +} From f74bd018d2b4e5fb27a6d9ecfd01608a7327861d Mon Sep 17 00:00:00 2001 From: chao <48119764+withchao@users.noreply.github.com> Date: Wed, 2 Apr 2025 18:18:06 +0800 Subject: [PATCH 05/14] feat: support server-issued configuration, which can be set for individual users (#3271) * pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * fix: DeleteDoc crash * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: user msg timestamp * seq read config * seq read config * fix: the source message of the reference is withdrawn, and the referenced message is deleted * feat: optimize the default notification.yml * fix: shouldPushOffline * fix: the sorting is wrong after canceling the administrator in group settings * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * fix: oss specifies content-type when uploading * fix: the version number contains a line break * fix: the version number contains a line break * feat: support client config * feat: support client config --- go.mod | 6 +- go.sum | 12 +-- internal/api/router.go | 5 + internal/api/user.go | 16 +++ internal/rpc/user/config.go | 71 +++++++++++++ internal/rpc/user/user.go | 11 ++- .../storage/cache/cachekey/client_config.go | 10 ++ pkg/common/storage/cache/client_config.go | 8 ++ .../storage/cache/redis/client_config.go | 69 +++++++++++++ .../storage/controller/client_config.go | 58 +++++++++++ pkg/common/storage/database/client_config.go | 15 +++ .../storage/database/mgo/client_config.go | 99 +++++++++++++++++++ pkg/common/storage/model/client_config.go | 7 ++ 13 files changed, 376 insertions(+), 11 deletions(-) create mode 100644 internal/rpc/user/config.go create mode 100644 pkg/common/storage/cache/cachekey/client_config.go create mode 100644 pkg/common/storage/cache/client_config.go create mode 100644 pkg/common/storage/cache/redis/client_config.go create mode 100644 pkg/common/storage/controller/client_config.go create mode 100644 pkg/common/storage/database/client_config.go create mode 100644 pkg/common/storage/database/mgo/client_config.go create mode 100644 pkg/common/storage/model/client_config.go diff --git a/go.mod b/go.mod index 0a9de4010..e64e064f4 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.72-alpha.79 - github.com/openimsdk/tools v0.0.50-alpha.74 + github.com/openimsdk/protocol v0.0.73-alpha.3 + github.com/openimsdk/tools v0.0.50-alpha.79 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 @@ -219,3 +219,5 @@ require ( golang.org/x/crypto v0.27.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) + +//replace github.com/openimsdk/protocol => /Users/chao/Desktop/code/protocol diff --git a/go.sum b/go.sum index 66af77379..e6408cfcd 100644 --- a/go.sum +++ b/go.sum @@ -345,12 +345,12 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= -github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= -github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.72-alpha.79 h1:e46no8WVAsmTzyy405klrdoUiG7u+1ohDsXvQuFng4s= -github.com/openimsdk/protocol v0.0.72-alpha.79/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= -github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc= -github.com/openimsdk/tools v0.0.50-alpha.74/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= +github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrkXcDACCqtyM= +github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= +github.com/openimsdk/protocol v0.0.73-alpha.3 h1:mf/REUZA5in2gk8ggwqJD8444xLvB7WlF7M97oXN78g= +github.com/openimsdk/protocol v0.0.73-alpha.3/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= +github.com/openimsdk/tools v0.0.50-alpha.79 h1:jxYEbrzaze4Z2r4NrKad816buZ690ix0L9MTOOOH3ik= +github.com/openimsdk/tools v0.0.50-alpha.79/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= diff --git a/internal/api/router.go b/internal/api/router.go index 216a43363..c7bc3c724 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -124,6 +124,11 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf userRouterGroup.POST("/add_notification_account", u.AddNotificationAccount) userRouterGroup.POST("/update_notification_account", u.UpdateNotificationAccountInfo) userRouterGroup.POST("/search_notification_account", u.SearchNotificationAccount) + + userRouterGroup.POST("/get_user_client_config", u.GetUserClientConfig) + userRouterGroup.POST("/set_user_client_config", u.SetUserClientConfig) + userRouterGroup.POST("/del_user_client_config", u.DelUserClientConfig) + userRouterGroup.POST("/page_user_client_config", u.PageUserClientConfig) } // friend routing group { diff --git a/internal/api/user.go b/internal/api/user.go index a88f8f65a..7f256f5dd 100644 --- a/internal/api/user.go +++ b/internal/api/user.go @@ -242,3 +242,19 @@ func (u *UserApi) UpdateNotificationAccountInfo(c *gin.Context) { func (u *UserApi) SearchNotificationAccount(c *gin.Context) { a2r.Call(c, user.UserClient.SearchNotificationAccount, u.Client) } + +func (u *UserApi) GetUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.GetUserClientConfig, u.Client) +} + +func (u *UserApi) SetUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.SetUserClientConfig, u.Client) +} + +func (u *UserApi) DelUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.DelUserClientConfig, u.Client) +} + +func (u *UserApi) PageUserClientConfig(c *gin.Context) { + a2r.Call(c, user.UserClient.PageUserClientConfig, u.Client) +} diff --git a/internal/rpc/user/config.go b/internal/rpc/user/config.go new file mode 100644 index 000000000..5a9a46359 --- /dev/null +++ b/internal/rpc/user/config.go @@ -0,0 +1,71 @@ +package user + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/authverify" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/utils/datautil" +) + +func (s *userServer) GetUserClientConfig(ctx context.Context, req *pbuser.GetUserClientConfigReq) (*pbuser.GetUserClientConfigResp, error) { + if req.UserID != "" { + if err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID); err != nil { + return nil, err + } + if _, err := s.db.GetUserByID(ctx, req.UserID); err != nil { + return nil, err + } + } + res, err := s.clientConfig.GetUserConfig(ctx, req.UserID) + if err != nil { + return nil, err + } + return &pbuser.GetUserClientConfigResp{Configs: res}, nil +} + +func (s *userServer) SetUserClientConfig(ctx context.Context, req *pbuser.SetUserClientConfigReq) (*pbuser.SetUserClientConfigResp, error) { + if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { + return nil, err + } + if req.UserID != "" { + if _, err := s.db.GetUserByID(ctx, req.UserID); err != nil { + return nil, err + } + } + if err := s.clientConfig.SetUserConfig(ctx, req.UserID, req.Configs); err != nil { + return nil, err + } + return &pbuser.SetUserClientConfigResp{}, nil +} + +func (s *userServer) DelUserClientConfig(ctx context.Context, req *pbuser.DelUserClientConfigReq) (*pbuser.DelUserClientConfigResp, error) { + if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { + return nil, err + } + if err := s.clientConfig.DelUserConfig(ctx, req.UserID, req.Keys); err != nil { + return nil, err + } + return &pbuser.DelUserClientConfigResp{}, nil +} + +func (s *userServer) PageUserClientConfig(ctx context.Context, req *pbuser.PageUserClientConfigReq) (*pbuser.PageUserClientConfigResp, error) { + if err := authverify.CheckAdmin(ctx, s.config.Share.IMAdminUserID); err != nil { + return nil, err + } + total, res, err := s.clientConfig.GetUserConfigPage(ctx, req.UserID, req.Key, req.Pagination) + if err != nil { + return nil, err + } + return &pbuser.PageUserClientConfigResp{ + Total: total, + Configs: datautil.Slice(res, func(e *model.ClientConfig) *pbuser.ClientConfig { + return &pbuser.ClientConfig{ + UserID: e.UserID, + Key: e.Key, + Value: e.Value, + } + }), + }, nil +} diff --git a/internal/rpc/user/user.go b/internal/rpc/user/user.go index d4fe7ecc4..42824d500 100644 --- a/internal/rpc/user/user.go +++ b/internal/rpc/user/user.go @@ -62,6 +62,7 @@ type userServer struct { webhookClient *webhook.Client groupClient *rpcli.GroupClient relationClient *rpcli.RelationClient + clientConfig controller.ClientConfigDatabase } type Config struct { @@ -94,6 +95,10 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi if err != nil { return err } + clientConfigDB, err := mgo.NewClientConfig(mgocli.GetDB()) + if err != nil { + return err + } msgConn, err := client.GetConn(ctx, config.Discovery.RpcService.Msg) if err != nil { return err @@ -118,9 +123,9 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi userNotificationSender: NewUserNotificationSender(config, msgClient, WithUserFunc(database.FindWithError)), config: config, webhookClient: webhook.NewWebhookClient(config.WebhooksConfig.URL), - - groupClient: rpcli.NewGroupClient(groupConn), - relationClient: rpcli.NewRelationClient(friendConn), + clientConfig: controller.NewClientConfigDatabase(clientConfigDB, redis.NewClientConfigCache(rdb, clientConfigDB), mgocli.GetTx()), + groupClient: rpcli.NewGroupClient(groupConn), + relationClient: rpcli.NewRelationClient(friendConn), } pbuser.RegisterUserServer(server, u) return u.db.InitOnce(context.Background(), users) diff --git a/pkg/common/storage/cache/cachekey/client_config.go b/pkg/common/storage/cache/cachekey/client_config.go new file mode 100644 index 000000000..16770adef --- /dev/null +++ b/pkg/common/storage/cache/cachekey/client_config.go @@ -0,0 +1,10 @@ +package cachekey + +const ClientConfig = "CLIENT_CONFIG" + +func GetClientConfigKey(userID string) string { + if userID == "" { + return ClientConfig + } + return ClientConfig + ":" + userID +} diff --git a/pkg/common/storage/cache/client_config.go b/pkg/common/storage/cache/client_config.go new file mode 100644 index 000000000..329f25c59 --- /dev/null +++ b/pkg/common/storage/cache/client_config.go @@ -0,0 +1,8 @@ +package cache + +import "context" + +type ClientConfigCache interface { + DeleteUserCache(ctx context.Context, userIDs []string) error + GetUserConfig(ctx context.Context, userID string) (map[string]string, error) +} diff --git a/pkg/common/storage/cache/redis/client_config.go b/pkg/common/storage/cache/redis/client_config.go new file mode 100644 index 000000000..c5a455146 --- /dev/null +++ b/pkg/common/storage/cache/redis/client_config.go @@ -0,0 +1,69 @@ +package redis + +import ( + "context" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/redis/go-redis/v9" +) + +func NewClientConfigCache(rdb redis.UniversalClient, mgo database.ClientConfig) cache.ClientConfigCache { + rc := newRocksCacheClient(rdb) + return &ClientConfigCache{ + mgo: mgo, + rcClient: rc, + delete: rc.GetBatchDeleter(), + } +} + +type ClientConfigCache struct { + mgo database.ClientConfig + rcClient *rocksCacheClient + delete cache.BatchDeleter +} + +func (x *ClientConfigCache) getExpireTime(userID string) time.Duration { + if userID == "" { + return time.Hour * 24 + } else { + return time.Hour + } +} + +func (x *ClientConfigCache) getClientConfigKey(userID string) string { + return cachekey.GetClientConfigKey(userID) +} + +func (x *ClientConfigCache) GetConfig(ctx context.Context, userID string) (map[string]string, error) { + return getCache(ctx, x.rcClient, x.getClientConfigKey(userID), x.getExpireTime(userID), func(ctx context.Context) (map[string]string, error) { + return x.mgo.Get(ctx, userID) + }) +} + +func (x *ClientConfigCache) DeleteUserCache(ctx context.Context, userIDs []string) error { + keys := make([]string, 0, len(userIDs)) + for _, userID := range userIDs { + keys = append(keys, x.getClientConfigKey(userID)) + } + return x.delete.ExecDelWithKeys(ctx, keys) +} + +func (x *ClientConfigCache) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) { + config, err := x.GetConfig(ctx, "") + if err != nil { + return nil, err + } + if userID != "" { + userConfig, err := x.GetConfig(ctx, userID) + if err != nil { + return nil, err + } + for k, v := range userConfig { + config[k] = v + } + } + return config, nil +} diff --git a/pkg/common/storage/controller/client_config.go b/pkg/common/storage/controller/client_config.go new file mode 100644 index 000000000..1c3787634 --- /dev/null +++ b/pkg/common/storage/controller/client_config.go @@ -0,0 +1,58 @@ +package controller + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/pagination" + "github.com/openimsdk/tools/db/tx" +) + +type ClientConfigDatabase interface { + SetUserConfig(ctx context.Context, userID string, config map[string]string) error + GetUserConfig(ctx context.Context, userID string) (map[string]string, error) + DelUserConfig(ctx context.Context, userID string, keys []string) error + GetUserConfigPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) +} + +func NewClientConfigDatabase(db database.ClientConfig, cache cache.ClientConfigCache, tx tx.Tx) ClientConfigDatabase { + return &clientConfigDatabase{ + tx: tx, + db: db, + cache: cache, + } +} + +type clientConfigDatabase struct { + tx tx.Tx + db database.ClientConfig + cache cache.ClientConfigCache +} + +func (x *clientConfigDatabase) SetUserConfig(ctx context.Context, userID string, config map[string]string) error { + return x.tx.Transaction(ctx, func(ctx context.Context) error { + if err := x.db.Set(ctx, userID, config); err != nil { + return err + } + return x.cache.DeleteUserCache(ctx, []string{userID}) + }) +} + +func (x *clientConfigDatabase) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) { + return x.cache.GetUserConfig(ctx, userID) +} + +func (x *clientConfigDatabase) DelUserConfig(ctx context.Context, userID string, keys []string) error { + return x.tx.Transaction(ctx, func(ctx context.Context) error { + if err := x.db.Del(ctx, userID, keys); err != nil { + return err + } + return x.cache.DeleteUserCache(ctx, []string{userID}) + }) +} + +func (x *clientConfigDatabase) GetUserConfigPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) { + return x.db.GetPage(ctx, userID, key, pagination) +} diff --git a/pkg/common/storage/database/client_config.go b/pkg/common/storage/database/client_config.go new file mode 100644 index 000000000..7fa888d24 --- /dev/null +++ b/pkg/common/storage/database/client_config.go @@ -0,0 +1,15 @@ +package database + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/pagination" +) + +type ClientConfig interface { + Set(ctx context.Context, userID string, config map[string]string) error + Get(ctx context.Context, userID string) (map[string]string, error) + Del(ctx context.Context, userID string, keys []string) error + GetPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) +} diff --git a/pkg/common/storage/database/mgo/client_config.go b/pkg/common/storage/database/mgo/client_config.go new file mode 100644 index 000000000..0aa462899 --- /dev/null +++ b/pkg/common/storage/database/mgo/client_config.go @@ -0,0 +1,99 @@ +// Copyright © 2023 OpenIM open source community. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mgo + +import ( + "context" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/mongoutil" + "github.com/openimsdk/tools/db/pagination" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/openimsdk/tools/errs" +) + +func NewClientConfig(db *mongo.Database) (database.ClientConfig, error) { + coll := db.Collection("config") + _, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "key", Value: 1}, + {Key: "user_id", Value: 1}, + }, + Options: options.Index().SetUnique(true), + }, + }) + if err != nil { + return nil, errs.Wrap(err) + } + return &ClientConfig{ + coll: coll, + }, nil +} + +type ClientConfig struct { + coll *mongo.Collection +} + +func (x *ClientConfig) Set(ctx context.Context, userID string, config map[string]string) error { + if len(config) == 0 { + return nil + } + for key, value := range config { + filter := bson.M{"key": key, "user_id": userID} + update := bson.M{ + "value": value, + } + err := mongoutil.UpdateOne(ctx, x.coll, filter, bson.M{"$set": update}, false, options.Update().SetUpsert(true)) + if err != nil { + return err + } + } + return nil +} + +func (x *ClientConfig) Get(ctx context.Context, userID string) (map[string]string, error) { + cs, err := mongoutil.Find[*model.ClientConfig](ctx, x.coll, bson.M{"user_id": userID}) + if err != nil { + return nil, err + } + cm := make(map[string]string) + for _, config := range cs { + cm[config.Key] = config.Value + } + return cm, nil +} + +func (x *ClientConfig) Del(ctx context.Context, userID string, keys []string) error { + if len(keys) == 0 { + return nil + } + return mongoutil.DeleteMany(ctx, x.coll, bson.M{"key": bson.M{"$in": keys}, "user_id": userID}) +} + +func (x *ClientConfig) GetPage(ctx context.Context, userID string, key string, pagination pagination.Pagination) (int64, []*model.ClientConfig, error) { + filter := bson.M{} + if userID != "" { + filter["user_id"] = userID + } + if key != "" { + filter["key"] = key + } + return mongoutil.FindPage[*model.ClientConfig](ctx, x.coll, filter, pagination) +} diff --git a/pkg/common/storage/model/client_config.go b/pkg/common/storage/model/client_config.go new file mode 100644 index 000000000..f06e29102 --- /dev/null +++ b/pkg/common/storage/model/client_config.go @@ -0,0 +1,7 @@ +package model + +type ClientConfig struct { + Key string `bson:"key"` + UserID string `bson:"user_id"` + Value string `bson:"value"` +} From a74bb8a17edf3593d4a6ab0c7e8ee74f7e7029cd Mon Sep 17 00:00:00 2001 From: chao <48119764+withchao@users.noreply.github.com> Date: Mon, 14 Apr 2025 11:18:07 +0800 Subject: [PATCH 06/14] feat: GetConversationsHasReadAndMaxSeq support pinned (#3281) * pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * fix: DeleteDoc crash * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: user msg timestamp * seq read config * seq read config * fix: the source message of the reference is withdrawn, and the referenced message is deleted * feat: optimize the default notification.yml * fix: shouldPushOffline * fix: the sorting is wrong after canceling the administrator in group settings * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * fix: oss specifies content-type when uploading * fix: the version number contains a line break * fix: the version number contains a line break * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned --- go.mod | 4 ++-- go.sum | 12 ++++++------ internal/rpc/msg/as_read.go | 7 +++++++ pkg/rpccache/conversation.go | 25 +++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 0a9de4010..bd2db9790 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/openimsdk/protocol v0.0.72-alpha.79 - github.com/openimsdk/tools v0.0.50-alpha.74 + github.com/openimsdk/protocol v0.0.73-alpha.6 + github.com/openimsdk/tools v0.0.50-alpha.79 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 66af77379..390a51c4a 100644 --- a/go.sum +++ b/go.sum @@ -345,12 +345,12 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= -github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y= -github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= -github.com/openimsdk/protocol v0.0.72-alpha.79 h1:e46no8WVAsmTzyy405klrdoUiG7u+1ohDsXvQuFng4s= -github.com/openimsdk/protocol v0.0.72-alpha.79/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= -github.com/openimsdk/tools v0.0.50-alpha.74 h1:yh10SiMiivMEjicEQg+QAsH4pvaO+4noMPdlw+ew0Kc= -github.com/openimsdk/tools v0.0.50-alpha.74/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= +github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrkXcDACCqtyM= +github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= +github.com/openimsdk/protocol v0.0.73-alpha.6 h1:sna9coWG7HN1zObBPtvG0Ki/vzqHXiB4qKbA5P3w7kc= +github.com/openimsdk/protocol v0.0.73-alpha.6/go.mod h1:WF7EuE55vQvpyUAzDXcqg+B+446xQyEba0X35lTINmw= +github.com/openimsdk/tools v0.0.50-alpha.79 h1:jxYEbrzaze4Z2r4NrKad816buZ690ix0L9MTOOOH3ik= +github.com/openimsdk/tools v0.0.50-alpha.79/go.mod h1:n2poR3asX1e1XZce4O+MOWAp+X02QJRFvhcLCXZdzRo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= diff --git a/internal/rpc/msg/as_read.go b/internal/rpc/msg/as_read.go index de1879438..b25eae6b1 100644 --- a/internal/rpc/msg/as_read.go +++ b/internal/rpc/msg/as_read.go @@ -61,6 +61,13 @@ func (m *msgServer) GetConversationsHasReadAndMaxSeq(ctx context.Context, req *m return nil, err } resp := &msg.GetConversationsHasReadAndMaxSeqResp{Seqs: make(map[string]*msg.Seqs)} + if req.ReturnPinned { + pinnedConversationIDs, err := m.ConversationLocalCache.GetPinnedConversationIDs(ctx, req.UserID) + if err != nil { + return nil, err + } + resp.PinnedConversationIDs = pinnedConversationIDs + } for conversationID, maxSeq := range maxSeqs { resp.Seqs[conversationID] = &msg.Seqs{ HasReadSeq: hasReadSeqs[conversationID], diff --git a/pkg/rpccache/conversation.go b/pkg/rpccache/conversation.go index 70f5acfd1..162fda596 100644 --- a/pkg/rpccache/conversation.go +++ b/pkg/rpccache/conversation.go @@ -16,6 +16,7 @@ package rpccache import ( "context" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/open-im-server/v3/pkg/localcache" @@ -153,6 +154,26 @@ func (c *ConversationLocalCache) getConversationNotReceiveMessageUserIDs(ctx con })) } +func (c *ConversationLocalCache) getPinnedConversationIDs(ctx context.Context, userID string) (val []string, err error) { + log.ZDebug(ctx, "ConversationLocalCache getPinnedConversations req", "userID", userID) + defer func() { + if err == nil { + log.ZDebug(ctx, "ConversationLocalCache getPinnedConversations return", "userID", userID, "value", val) + } else { + log.ZError(ctx, "ConversationLocalCache getPinnedConversations return", err, "userID", userID) + } + }() + var cache cacheProto[pbconversation.GetPinnedConversationIDsResp] + resp, err := cache.Unmarshal(c.local.Get(ctx, cachekey.GetPinnedConversationIDs(userID), func(ctx context.Context) ([]byte, error) { + log.ZDebug(ctx, "ConversationLocalCache getConversationNotReceiveMessageUserIDs rpc", "userID", userID) + return cache.Marshal(c.client.ConversationClient.GetPinnedConversationIDs(ctx, &pbconversation.GetPinnedConversationIDsReq{UserID: userID})) + })) + if err != nil { + return nil, err + } + return resp.ConversationIDs, nil +} + func (c *ConversationLocalCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) { res, err := c.getConversationNotReceiveMessageUserIDs(ctx, conversationID) if err != nil { @@ -168,3 +189,7 @@ func (c *ConversationLocalCache) GetConversationNotReceiveMessageUserIDMap(ctx c } return datautil.SliceSet(res.UserIDs), nil } + +func (c *ConversationLocalCache) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) { + return c.getPinnedConversationIDs(ctx, userID) +} From 3d73bd581e100be388f65b0b6e99c52ef63b3a45 Mon Sep 17 00:00:00 2001 From: chao <48119764+withchao@users.noreply.github.com> Date: Tue, 15 Apr 2025 18:27:39 +0800 Subject: [PATCH 07/14] fix: transferring the group owner to a muted member, incremental version error (#3284) * pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * fix: DeleteDoc crash * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: user msg timestamp * seq read config * seq read config * fix: the source message of the reference is withdrawn, and the referenced message is deleted * feat: optimize the default notification.yml * fix: shouldPushOffline * fix: the sorting is wrong after canceling the administrator in group settings * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * fix: oss specifies content-type when uploading * fix: the version number contains a line break * fix: the version number contains a line break * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned * fix: transferring the group owner to a muted member, incremental version error --- internal/rpc/group/notification.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 1aa5333b4..29c4ef634 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -283,7 +283,8 @@ func (g *NotificationSender) fillOpUserByUserID(ctx context.Context, userID stri func (g *NotificationSender) setVersion(ctx context.Context, version *uint64, versionID *string, collName string, id string) { versions := versionctx.GetVersionLog(ctx).Get() - for _, coll := range versions { + for i := len(versions) - 1; i >= 0; i-- { + coll := versions[i] if coll.Name == collName && coll.Doc.DID == id { *version = uint64(coll.Doc.Version) *versionID = coll.Doc.ID.Hex() From 08280b9d2a1d411af85e66c2bccb79b1bf8314fd Mon Sep 17 00:00:00 2001 From: icey-yu <119291641+icey-yu@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:28:23 +0800 Subject: [PATCH 08/14] fix: group status in GroupDismissedNotification (#3286) --- internal/rpc/group/group.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index 602c4f3ee..fe7995058 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -1369,6 +1369,7 @@ func (g *groupServer) DismissGroup(ctx context.Context, req *pbgroup.DismissGrou if err != nil { return nil, err } + group.Status = constant.GroupStatusDismissed tips := &sdkws.GroupDismissedTips{ Group: g.groupDB2PB(group, owner.UserID, num), OpUser: &sdkws.GroupMemberFullInfo{}, From 338600c3e01ad50abe2cca42321992d45079fb09 Mon Sep 17 00:00:00 2001 From: Monet Lee Date: Tue, 22 Apr 2025 15:52:55 +0800 Subject: [PATCH 09/14] feat: Implement stress test v2. (#3292) * feat: improve stress test code. * feat: Implement stress test v2. --- tools/stress-test-v2/main.go | 736 +++++++++++++++++++++++++++++++++++ tools/stress-test/main.go | 458 ++++++++++++++++++++++ 2 files changed, 1194 insertions(+) create mode 100644 tools/stress-test-v2/main.go create mode 100755 tools/stress-test/main.go diff --git a/tools/stress-test-v2/main.go b/tools/stress-test-v2/main.go new file mode 100644 index 000000000..0c309b9c9 --- /dev/null +++ b/tools/stress-test-v2/main.go @@ -0,0 +1,736 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +// 1. Create 100K New Users +// 2. Create 100 100K Groups +// 3. Create 1000 999 Groups +// 4. Send message to 100K Groups every second +// 5. Send message to 999 Groups every minute + +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + // "", + } + // DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + UserCheck = "/user/account_check" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + GetGroupMemberInfo = "/group/get_group_members_info" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 100000 + Max100KGroup = 100 + Max999Group = 1000 + MaxInviteUserLimit = 999 + + CreateUserTicker = 1 * time.Second + CreateGroupTicker = 1 * time.Second + Create100KGroupTicker = 1 * time.Second + Create999GroupTicker = 1 * time.Second + SendMsgTo100KGroupTicker = 1 * time.Second + SendMsgTo999GroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + CreateUserCounter int + Create100kGroupCounter int + Create999GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CheckUser(ctx context.Context, userIDs []string) ([]string, error) { + req := pbuser.AccountCheckReq{ + CheckUserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+UserCheck, &req) + if err != nil { + return nil, err + } + + data := &pbuser.AccountCheckResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + unRegisteredUserIDs := make([]string, 0) + + for _, res := range data.Results { + if res.AccountStatus == constant.UnRegistered { + unRegisteredUserIDs = append(unRegisteredUserIDs, res.UserID) + } + } + + return unRegisteredUserIDs, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) CreateUserBatch(ctx context.Context, userIDs []string) error { + // The method can import a large number of users at once. + var userList []*sdkws.UserInfo + + defer st.Once.Do( + func() { + st.DefaultUserID = userIDs[0] + fmt.Println("Default Send User Created ID:", st.DefaultUserID) + }) + + needUserIDs, err := st.CheckUser(ctx, userIDs) + if err != nil { + return err + } + + for _, userID := range needUserIDs { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + userList = append(userList, user) + } + + req := pbuser.UserRegisterReq{ + Users: userList, + } + + _, err = st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return err + } + + st.UserCounter += len(userList) + return nil +} + +func (st *StressTest) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]string, error) { + needInviteUserIDs := make([]string, 0) + + const maxBatchSize = 500 + if len(userIDs) > maxBatchSize { + for i := 0; i < len(userIDs); i += maxBatchSize { + end := min(i+maxBatchSize, len(userIDs)) + batchUserIDs := userIDs[i:end] + + // log.ZInfo(ctx, "Processing group members batch", "groupID", groupID, "batch", i/maxBatchSize+1, + // "batchUserCount", len(batchUserIDs)) + + // Process a single batch + batchReq := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: batchUserIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &batchReq) + if err != nil { + log.ZError(ctx, "Batch query failed", err, "batch", i/maxBatchSize+1) + continue + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + log.ZError(ctx, "Failed to parse batch response", err, "batch", i/maxBatchSize+1) + continue + } + + // Process the batch results + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range batchUserIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + } + + return needInviteUserIDs, nil + } + + req := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &req) + if err != nil { + return nil, err + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range userIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + + return needInviteUserIDs, nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, groupID string, userIDs []string) error { + req := group.InviteUserToGroupReq{ + GroupID: groupID, + InvitedUserIDs: userIDs, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string, groupID string) error { + contentObj := map[string]any{ + // "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + "content": fmt.Sprintf("The current time is %s", time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: groupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +// Max userIDs number is 1000 +func (st *StressTest) CreateGroup(ctx context.Context, groupID string, userID string, userIDsList []string) (string, error) { + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: userIDsList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + // st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + // ch := make(chan struct{}) + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + go func() { + // time.Sleep(5 * time.Second) + fmt.Println("Force exit") + os.Exit(0) + }() + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + for i := range MaxUser { + userID := fmt.Sprintf("v2_StressTest_User_%d", i) + st.CreatedUsers = append(st.CreatedUsers, userID) + st.CreateUserCounter++ + } + + // err = st.CreateUserBatch(st.Ctx, st.CreatedUsers) + // if err != nil { + // log.ZError(ctx, "Create user failed.", err) + // } + + const batchSize = 1000 + totalUsers := len(st.CreatedUsers) + successCount := 0 + + if st.DefaultUserID == "" && len(st.CreatedUsers) > 0 { + st.DefaultUserID = st.CreatedUsers[0] + } + + for i := 0; i < totalUsers; i += batchSize { + end := min(i+batchSize, totalUsers) + + userBatch := st.CreatedUsers[i:end] + log.ZInfo(st.Ctx, "Creating user batch", "batch", i/batchSize+1, "count", len(userBatch)) + + err = st.CreateUserBatch(st.Ctx, userBatch) + if err != nil { + log.ZError(st.Ctx, "Batch user creation failed", err, "batch", i/batchSize+1) + } else { + successCount += len(userBatch) + log.ZInfo(st.Ctx, "Batch user creation succeeded", "batch", i/batchSize+1, + "progress", fmt.Sprintf("%d/%d", successCount, totalUsers)) + } + } + + // Execute create 100k group + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create100kGroupTicker := time.NewTicker(Create100KGroupTicker) + defer create100kGroupTicker.Stop() + + for i := range Max100KGroup { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 100K Group") + return + + case <-create100kGroupTicker.C: + // Create 100K groups + st.Wg.Add(1) + go func(idx int) { + defer st.Wg.Done() + defer func() { + st.Create100kGroupCounter++ + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + + for i := 0; i < MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // create 999 groups + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create999GroupTicker := time.NewTicker(Create999GroupTicker) + defer create999GroupTicker.Stop() + + for i := range Max999Group { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 999 Group") + return + + case <-create999GroupTicker.C: + // Create 999 groups + st.Wg.Add(1) + go func(idx int) { + defer st.Wg.Done() + defer func() { + st.Create999GroupCounter++ + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + for i := 0; i < MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // Send message to 100K groups + st.Wg.Wait() + fmt.Println("All groups created successfully, starting to send messages...") + log.ZInfo(ctx, "All groups created successfully, starting to send messages...") + + var groups100K []string + var groups999 []string + + for i := range Max100KGroup { + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", i) + groups100K = append(groups100K, groupID) + } + + for i := range Max999Group { + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", i) + groups999 = append(groups999, groupID) + } + + send100kGroupLimiter := make(chan struct{}, 20) + send999GroupLimiter := make(chan struct{}, 100) + + // execute Send message to 100K groups + go func() { + ticker := time.NewTicker(SendMsgTo100KGroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 100K Group") + return + + case <-ticker.C: + // Send message to 100K groups + for _, groupID := range groups100K { + send100kGroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send100kGroupLimiter }() + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 100K group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 100K groups successfully.") + } + } + }() + + // execute Send message to 999 groups + go func() { + ticker := time.NewTicker(SendMsgTo999GroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 999 Group") + return + + case <-ticker.C: + // Send message to 999 groups + for _, groupID := range groups999 { + send999GroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send999GroupLimiter }() + + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 999 group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 999 groups successfully.") + } + } + }() + + <-st.Ctx.Done() + fmt.Println("Received signal to exit, shutting down...") +} diff --git a/tools/stress-test/main.go b/tools/stress-test/main.go new file mode 100755 index 000000000..6adbd12ee --- /dev/null +++ b/tools/stress-test/main.go @@ -0,0 +1,458 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: st.DefaultGroupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: TestTargetUserList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} From e5bac946cdb7d36de040e3495fe93ce9becc00ec Mon Sep 17 00:00:00 2001 From: skiffer-git <72860476+skiffer-git@users.noreply.github.com> Date: Tue, 22 Apr 2025 16:48:12 +0800 Subject: [PATCH 10/14] License (#3293) * 3.6.1 code conventions (#2203) * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * refactor: webhooks update. * refactor: kafka update. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * refactor: kafka update. * refactor: kafka update. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * Windows can compile and run. * Windows can compile and run. * refactor: kafka update. * feat: msg cache split * refactor: webhooks update * refactor: webhooks update * refactor: friends update * refactor: group update * refactor: third update * refactor: api update * refactor: crontab update * refactor: msggateway update * mage * mage * refactor: all module update. * check * refactor: all module update. * load config * load config * load config * load config * refactor: all module update. * refactor: all module update. * refactor: all module update. * refactor: all module update. * refactor: all module update. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * refactor: all module update. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * update tools * update tools * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * update protocol * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: api remove token auth by redis directly. * Code Refactoring * refactor: websocket auth change to call rpc of auth. * refactor: kick online user and remove token change to call auth rpc. * refactor: kick online user and remove token change to call auth rpc. * refactor: remove msggateway redis. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor webhook * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor webhook * refactor: cmd update. * refactor: cmd update. * fix: runtime: goroutine stack exceeds * refactor: cmd update. * refactor notification * refactor notification * refactor * refactor: cmd update. * refactor: cmd update. * refactor * refactor * refactor * protojson * protojson * protojson * go mod * wrapperspb * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: context update. * refactor: websocket update info. * refactor: websocket update info. * refactor: websocket update info. * refactor: websocket update info. * refactor: api name change. * refactor: debug info. * refactor: debug info. * refactor: debug info. * fix: update file * refactor * refactor * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * fix: callback update. * fix: callback update. * refactor * fix: update message. * fix: msg cache timeout. * refactor * refactor * fix: push update. * fix: push update. * fix: push update. * fix: push update. * fix: push update. * fix: push update. * refactor * refactor * fix: push update. * fix: websocket handle error remove when upgrade error. * fix: priority url * fix: minio config * refactor: add zk logger. * refactor * fix: minio config * refactor * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * fix bug: get localIP * refactor * refactor * refactor * refactor: remove zk logger. * refactor: update tools version. * refactor * refactor: update server version to 3.7.0. * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor * refactor * refactor * refactor: log level change. * refactor: 3.7.0 code conventions. --------- Co-authored-by: skiffer-git <44203734@qq.com> Co-authored-by: withchao <993506633@qq.com> Co-authored-by: root * update go.mod go.sum (#2209) * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * remove \r * fix bug: get localIP * update some ci file (#2200) * Update openimci.yml * Update golangci-lint.yml * Update e2e-test.yml * 3.6.1 code conventions (#2202) * refactor: webhooks update. * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * feat: s3 api addr * refactor: webhooks update. * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * Adjust configuration settings * refactor: webhooks update. * refactor: kafka update. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * refactor: kafka update. * refactor: kafka update. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * Simplify the Docker Compose configuration, remove unnecessary environment variables, and eliminate the gateway service. * Windows can compile and run. * Windows can compile and run. * refactor: kafka update. * feat: msg cache split * refactor: webhooks update * refactor: webhooks update * refactor: friends update * refactor: group update * refactor: third update * refactor: api update * refactor: crontab update * refactor: msggateway update * mage * mage * refactor: all module update. * check * refactor: all module update. * load config * load config * load config * load config * refactor: all module update. * refactor: all module update. * refactor: all module update. * refactor: all module update. * refactor: all module update. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * refactor: all module update. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * update tools * update tools * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * update protocol * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: all module update. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * Optimize Docker configuration and script. * refactor: api remove token auth by redis directly. * Code Refactoring * refactor: websocket auth change to call rpc of auth. * refactor: kick online user and remove token change to call auth rpc. * refactor: kick online user and remove token change to call auth rpc. * refactor: remove msggateway redis. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor webhook * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor webhook * refactor: cmd update. * refactor: cmd update. * fix: runtime: goroutine stack exceeds * refactor: cmd update. * refactor notification * refactor notification * refactor * refactor: cmd update. * refactor: cmd update. * refactor * refactor * refactor * protojson * protojson * protojson * go mod * wrapperspb * refactor: cmd update. * refactor: cmd update. * refactor: cmd update. * refactor: context update. * refactor: websocket update info. * refactor: websocket update info. * refactor: websocket update info. * refactor: websocket update info. * refactor: api name change. * refactor: debug info. * refactor: debug info. * refactor: debug info. * fix: update file * refactor * refactor * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * refactor: debug info. * fix: callback update. * fix: callback update. * refactor * fix: update message. * fix: msg cache timeout. * refactor * refactor * fix: push update. * fix: push update. * fix: push update. * fix: push update. * fix: push update. * fix: push update. * refactor * refactor * fix: push update. * fix: websocket handle error remove when upgrade error. * fix: priority url * fix: minio config * refactor: add zk logger. * refactor * fix: minio config * refactor * refactor * refactor * refactor * refactor: remove zk logger. * refactor: update tools version. * refactor * refactor: update server version to 3.7.0. * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor: zk log debug. * refactor * refactor * refactor * refactor: log level change. * refactor: 3.7.0 code conventions. --------- Co-authored-by: skiffer-git <44203734@qq.com> Co-authored-by: withchao <993506633@qq.com> * update go.mod go.sum * Remove Chinese comments * user localhost for minio * user localhost for minio * Remove Chinese comments * Remove Chinese comments * Remove Chinese comments * Set up 4 instances of transfer * Set up 4 instances of transfer * Add comments to the configuration file * Add comments to the configuration file --------- Co-authored-by: root Co-authored-by: xuan <146319162+wxuanF@users.noreply.github.com> Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com> Co-authored-by: withchao <993506633@qq.com> * Update the document (#2221) * Update the document * Update the document * use openim/openim-admin openim/openim-web image * Update .golangci.yml * Add etcd as a service discovery mechanism * Add etcd as a service discovery mechanism * update * update license * update license * update license * update license * update license --------- Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com> Co-authored-by: withchao <993506633@qq.com> Co-authored-by: root Co-authored-by: xuan <146319162+wxuanF@users.noreply.github.com> --- .golangci.yml | 2 ++ README.md | 10 +++++++++- README_zh_CN.md | 12 ++++++++++-- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a95e980f8..7d6c6b596 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -28,6 +28,8 @@ run: # - util # - .*~ # - api/swagger/docs + + # - server/docs # - components/mnt/config/certs # - logs diff --git a/README.md b/README.md index 9b6606f88..4745b9a37 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,15 @@ Thank you for contributing to building a powerful instant messaging solution! ## :closed_book: License -This software is licensed under the Apache License 2.0 +This software is licensed under a dual-license model: + +- The GNU Affero General Public License (AGPL), Version 3 or later; **OR** +- Commercial license terms from OpenIMSDK. + +If you wish to use this software under commercial terms, please contact us at: contact@openim.io + +For more information, see: https://www.openim.io/en/licensing + diff --git a/README_zh_CN.md b/README_zh_CN.md index 1dcf21bda..2340ad09a 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -131,9 +131,17 @@ 感谢您的贡献,一起来打造强大的即时通讯解决方案! -## :closed_book: 许可证 +## :closed_book: 开源许可证 License + +本软件采用双重授权模型: + +GNU Affero 通用公共许可证(AGPL)第 3 版或更高版本;或 + +来自 OpenIMSDK 的商业授权条款。 + +如需商用,请联系:contact@openim.io +详见:https://www.openim.io/en/licensing -This software is licensed under the Apache License 2.0 ## 🔮 Thanks to our contributors! From 8e9d65561b0f3f0c65f6e52ddf4cfdb8643a3054 Mon Sep 17 00:00:00 2001 From: Monet Lee Date: Fri, 25 Apr 2025 15:57:23 +0800 Subject: [PATCH 11/14] refactor: move stress-test tools location. (#3295) * refactor: move stress-test tools location. * improve stress tools. * improve stress_test-v2 --- test/stress-test-v2/README.md | 19 + test/stress-test-v2/main.go | 759 ++++++++++++++++++++++++++++++++++ test/stress-test/README.md | 19 + test/stress-test/main.go | 458 ++++++++++++++++++++ 4 files changed, 1255 insertions(+) create mode 100644 test/stress-test-v2/README.md create mode 100644 test/stress-test-v2/main.go create mode 100644 test/stress-test/README.md create mode 100755 test/stress-test/main.go diff --git a/test/stress-test-v2/README.md b/test/stress-test-v2/README.md new file mode 100644 index 000000000..cbd4bdbde --- /dev/null +++ b/test/stress-test-v2/README.md @@ -0,0 +1,19 @@ +# Stress Test V2 + +## Usage + +You need set `TestTargetUserList` variables. + +### Build + +```bash + +go build -o test/stress-test-v2/stress-test-v2 test/stress-test-v2/main.go +``` + +### Excute + +```bash + +tools/stress-test-v2/stress-test-v2 -c config/ +``` diff --git a/test/stress-test-v2/main.go b/test/stress-test-v2/main.go new file mode 100644 index 000000000..0e4609964 --- /dev/null +++ b/test/stress-test-v2/main.go @@ -0,0 +1,759 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +// 1. Create 100K New Users +// 2. Create 100 100K Groups +// 3. Create 1000 999 Groups +// 4. Send message to 100K Groups every second +// 5. Send message to 999 Groups every minute + +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + // "", + } + // DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + UserCheck = "/user/account_check" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + GetGroupMemberInfo = "/group/get_group_members_info" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 100000 + Max1kUser = 1000 + Max100KGroup = 100 + Max999Group = 1000 + MaxInviteUserLimit = 999 + + CreateUserTicker = 1 * time.Second + CreateGroupTicker = 1 * time.Second + Create100KGroupTicker = 1 * time.Second + Create999GroupTicker = 1 * time.Second + SendMsgTo100KGroupTicker = 1 * time.Second + SendMsgTo999GroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + CreateUserCounter int + Create100kGroupCounter int + Create999GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + // log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CheckUser(ctx context.Context, userIDs []string) ([]string, error) { + req := pbuser.AccountCheckReq{ + CheckUserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+UserCheck, &req) + if err != nil { + return nil, err + } + + data := &pbuser.AccountCheckResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + unRegisteredUserIDs := make([]string, 0) + + for _, res := range data.Results { + if res.AccountStatus == constant.UnRegistered { + unRegisteredUserIDs = append(unRegisteredUserIDs, res.UserID) + } + } + + return unRegisteredUserIDs, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) CreateUserBatch(ctx context.Context, userIDs []string) error { + // The method can import a large number of users at once. + var userList []*sdkws.UserInfo + + defer st.Once.Do( + func() { + st.DefaultUserID = userIDs[0] + fmt.Println("Default Send User Created ID:", st.DefaultUserID) + }) + + needUserIDs, err := st.CheckUser(ctx, userIDs) + if err != nil { + return err + } + + for _, userID := range needUserIDs { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + userList = append(userList, user) + } + + req := pbuser.UserRegisterReq{ + Users: userList, + } + + _, err = st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return err + } + + st.UserCounter += len(userList) + return nil +} + +func (st *StressTest) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]string, error) { + needInviteUserIDs := make([]string, 0) + + const maxBatchSize = 500 + if len(userIDs) > maxBatchSize { + for i := 0; i < len(userIDs); i += maxBatchSize { + end := min(i+maxBatchSize, len(userIDs)) + batchUserIDs := userIDs[i:end] + + // log.ZInfo(ctx, "Processing group members batch", "groupID", groupID, "batch", i/maxBatchSize+1, + // "batchUserCount", len(batchUserIDs)) + + // Process a single batch + batchReq := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: batchUserIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &batchReq) + if err != nil { + log.ZError(ctx, "Batch query failed", err, "batch", i/maxBatchSize+1) + continue + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + log.ZError(ctx, "Failed to parse batch response", err, "batch", i/maxBatchSize+1) + continue + } + + // Process the batch results + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range batchUserIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + } + + return needInviteUserIDs, nil + } + + req := group.GetGroupMembersInfoReq{ + GroupID: groupID, + UserIDs: userIDs, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetGroupMemberInfo, &req) + if err != nil { + return nil, err + } + + data := &group.GetGroupMembersInfoResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return nil, err + } + + existingMembers := make(map[string]bool) + for _, member := range data.Members { + existingMembers[member.UserID] = true + } + + for _, userID := range userIDs { + if !existingMembers[userID] { + needInviteUserIDs = append(needInviteUserIDs, userID) + } + } + + return needInviteUserIDs, nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, groupID string, userIDs []string) error { + req := group.InviteUserToGroupReq{ + GroupID: groupID, + InvitedUserIDs: userIDs, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string, groupID string) error { + contentObj := map[string]any{ + // "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + "content": fmt.Sprintf("The current time is %s", time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: groupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +// Max userIDs number is 1000 +func (st *StressTest) CreateGroup(ctx context.Context, groupID string, userID string, userIDsList []string) (string, error) { + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: userIDsList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + // st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + // ch := make(chan struct{}) + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + go func() { + // time.Sleep(5 * time.Second) + fmt.Println("Force exit") + os.Exit(0) + }() + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + for i := range MaxUser { + userID := fmt.Sprintf("v2_StressTest_User_%d", i) + st.CreatedUsers = append(st.CreatedUsers, userID) + st.CreateUserCounter++ + } + + // err = st.CreateUserBatch(st.Ctx, st.CreatedUsers) + // if err != nil { + // log.ZError(ctx, "Create user failed.", err) + // } + + const batchSize = 1000 + totalUsers := len(st.CreatedUsers) + successCount := 0 + + if st.DefaultUserID == "" && len(st.CreatedUsers) > 0 { + st.DefaultUserID = st.CreatedUsers[0] + } + + for i := 0; i < totalUsers; i += batchSize { + end := min(i+batchSize, totalUsers) + + userBatch := st.CreatedUsers[i:end] + log.ZInfo(st.Ctx, "Creating user batch", "batch", i/batchSize+1, "count", len(userBatch)) + + err = st.CreateUserBatch(st.Ctx, userBatch) + if err != nil { + log.ZError(st.Ctx, "Batch user creation failed", err, "batch", i/batchSize+1) + } else { + successCount += len(userBatch) + log.ZInfo(st.Ctx, "Batch user creation succeeded", "batch", i/batchSize+1, + "progress", fmt.Sprintf("%d/%d", successCount, totalUsers)) + } + } + + // Execute create 100k group + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create100kGroupTicker := time.NewTicker(Create100KGroupTicker) + defer create100kGroupTicker.Stop() + + for i := range Max100KGroup { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 100K Group") + return + + case <-create100kGroupTicker.C: + // Create 100K groups + st.Wg.Add(1) + go func(idx int) { + startTime := time.Now() + defer func() { + elapsedTime := time.Since(startTime) + log.ZInfo(st.Ctx, "100K group creation completed", + "groupID", fmt.Sprintf("v2_StressTest_Group_100K_%d", idx), + "index", idx, + "duration", elapsedTime.String()) + }() + + defer st.Wg.Done() + defer func() { + st.Mutex.Lock() + st.Create100kGroupCounter++ + st.Mutex.Unlock() + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + + for i := 0; i <= MaxUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, MaxUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // create 999 groups + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + create999GroupTicker := time.NewTicker(Create999GroupTicker) + defer create999GroupTicker.Stop() + + for i := range Max999Group { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create 999 Group") + return + + case <-create999GroupTicker.C: + // Create 999 groups + st.Wg.Add(1) + go func(idx int) { + startTime := time.Now() + defer func() { + elapsedTime := time.Since(startTime) + log.ZInfo(st.Ctx, "999 group creation completed", + "groupID", fmt.Sprintf("v2_StressTest_Group_1K_%d", idx), + "index", idx, + "duration", elapsedTime.String()) + }() + + defer st.Wg.Done() + defer func() { + st.Mutex.Lock() + st.Create999GroupCounter++ + st.Mutex.Unlock() + }() + + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", idx) + + if _, err = st.CreateGroup(st.Ctx, groupID, st.DefaultUserID, TestTargetUserList); err != nil { + log.ZError(st.Ctx, "Create group failed.", err) + // continue + } + for i := 0; i <= Max1kUser/MaxInviteUserLimit; i++ { + InviteUserIDs := make([]string, 0) + // ensure TargetUserList is in group + InviteUserIDs = append(InviteUserIDs, TestTargetUserList...) + + startIdx := max(i*MaxInviteUserLimit, 1) + endIdx := min((i+1)*MaxInviteUserLimit, Max1kUser) + + for j := startIdx; j < endIdx; j++ { + userCreatedID := fmt.Sprintf("v2_StressTest_User_%d", j) + InviteUserIDs = append(InviteUserIDs, userCreatedID) + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + InviteUserIDs, err := st.GetGroupMembersInfo(ctx, groupID, InviteUserIDs) + if err != nil { + log.ZError(st.Ctx, "GetGroupMembersInfo failed.", err, "groupID", groupID) + continue + } + + if len(InviteUserIDs) == 0 { + // log.ZWarn(st.Ctx, "InviteUserIDs is empty", nil, "groupID", groupID) + continue + } + + // Invite To Group + if err = st.InviteToGroup(st.Ctx, groupID, InviteUserIDs); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", InviteUserIDs) + continue + // os.Exit(1) + // return + } + } + }(i) + } + } + }() + + // Send message to 100K groups + st.Wg.Wait() + fmt.Println("All groups created successfully, starting to send messages...") + log.ZInfo(ctx, "All groups created successfully, starting to send messages...") + + var groups100K []string + var groups999 []string + + for i := range Max100KGroup { + groupID := fmt.Sprintf("v2_StressTest_Group_100K_%d", i) + groups100K = append(groups100K, groupID) + } + + for i := range Max999Group { + groupID := fmt.Sprintf("v2_StressTest_Group_1K_%d", i) + groups999 = append(groups999, groupID) + } + + send100kGroupLimiter := make(chan struct{}, 20) + send999GroupLimiter := make(chan struct{}, 100) + + // execute Send message to 100K groups + go func() { + ticker := time.NewTicker(SendMsgTo100KGroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 100K Group") + return + + case <-ticker.C: + // Send message to 100K groups + for _, groupID := range groups100K { + send100kGroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send100kGroupLimiter }() + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 100K group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 100K groups successfully.") + } + } + }() + + // execute Send message to 999 groups + go func() { + ticker := time.NewTicker(SendMsgTo999GroupTicker) + defer ticker.Stop() + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send Message to 999 Group") + return + + case <-ticker.C: + // Send message to 999 groups + for _, groupID := range groups999 { + send999GroupLimiter <- struct{}{} + go func(groupID string) { + defer func() { <-send999GroupLimiter }() + + if err := st.SendMsg(st.Ctx, st.DefaultUserID, groupID); err != nil { + log.ZError(st.Ctx, "Send message to 999 group failed.", err) + } + }(groupID) + } + // log.ZInfo(st.Ctx, "Send message to 999 groups successfully.") + } + } + }() + + <-st.Ctx.Done() + fmt.Println("Received signal to exit, shutting down...") +} diff --git a/test/stress-test/README.md b/test/stress-test/README.md new file mode 100644 index 000000000..cba93e279 --- /dev/null +++ b/test/stress-test/README.md @@ -0,0 +1,19 @@ +# Stress Test + +## Usage + +You need set `TestTargetUserList` and `DefaultGroupID` variables. + +### Build + +```bash + +go build -o test/stress-test/stress-test test/stress-test/main.go +``` + +### Excute + +```bash + +tools/stress-test/stress-test -c config/ +``` diff --git a/test/stress-test/main.go b/test/stress-test/main.go new file mode 100755 index 000000000..6adbd12ee --- /dev/null +++ b/test/stress-test/main.go @@ -0,0 +1,458 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/apistruct" + "github.com/openimsdk/open-im-server/v3/pkg/common/config" + "github.com/openimsdk/protocol/auth" + "github.com/openimsdk/protocol/constant" + "github.com/openimsdk/protocol/group" + "github.com/openimsdk/protocol/relation" + "github.com/openimsdk/protocol/sdkws" + pbuser "github.com/openimsdk/protocol/user" + "github.com/openimsdk/tools/log" + "github.com/openimsdk/tools/system/program" +) + +/* + 1. Create one user every minute + 2. Import target users as friends + 3. Add users to the default group + 4. Send a message to the default group every second, containing index and current timestamp + 5. Create a new group every minute and invite target users to join +*/ + +// !!! ATTENTION: This variable is must be added! +var ( + // Use default userIDs List for testing, need to be created. + TestTargetUserList = []string{ + "", + } + DefaultGroupID = "" // Use default group ID for testing, need to be created. +) + +var ( + ApiAddress string + + // API method + GetAdminToken = "/auth/get_admin_token" + CreateUser = "/user/user_register" + ImportFriend = "/friend/import_friend" + InviteToGroup = "/group/invite_user_to_group" + SendMsg = "/msg/send_msg" + CreateGroup = "/group/create_group" + GetUserToken = "/auth/user_token" +) + +const ( + MaxUser = 10000 + MaxGroup = 1000 + + CreateUserTicker = 1 * time.Minute // Ticker is 1min in create user + SendMessageTicker = 1 * time.Second // Ticker is 1s in send message + CreateGroupTicker = 1 * time.Minute +) + +type BaseResp struct { + ErrCode int `json:"errCode"` + ErrMsg string `json:"errMsg"` + Data json.RawMessage `json:"data"` +} + +type StressTest struct { + Conf *conf + AdminUserID string + AdminToken string + DefaultGroupID string + DefaultUserID string + UserCounter int + GroupCounter int + MsgCounter int + CreatedUsers []string + CreatedGroups []string + Mutex sync.Mutex + Ctx context.Context + Cancel context.CancelFunc + HttpClient *http.Client + Wg sync.WaitGroup + Once sync.Once +} + +type conf struct { + Share config.Share + Api config.API +} + +func initConfig(configDir string) (*config.Share, *config.API, error) { + var ( + share = &config.Share{} + apiConfig = &config.API{} + ) + + err := config.Load(configDir, config.ShareFileName, config.EnvPrefixMap[config.ShareFileName], share) + if err != nil { + return nil, nil, err + } + + err = config.Load(configDir, config.OpenIMAPICfgFileName, config.EnvPrefixMap[config.OpenIMAPICfgFileName], apiConfig) + if err != nil { + return nil, nil, err + } + + return share, apiConfig, nil +} + +// Post Request +func (st *StressTest) PostRequest(ctx context.Context, url string, reqbody any) ([]byte, error) { + // Marshal body + jsonBody, err := json.Marshal(reqbody) + if err != nil { + log.ZError(ctx, "Failed to marshal request body", err, "url", url, "reqbody", reqbody) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(jsonBody)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("operationID", st.AdminUserID) + if st.AdminToken != "" { + req.Header.Set("token", st.AdminToken) + } + + // log.ZInfo(ctx, "Header info is ", "Content-Type", "application/json", "operationID", st.AdminUserID, "token", st.AdminToken) + + resp, err := st.HttpClient.Do(req) + if err != nil { + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody) + return nil, err + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.ZError(ctx, "Failed to read response body", err, "url", url) + return nil, err + } + + var baseResp BaseResp + if err := json.Unmarshal(respBody, &baseResp); err != nil { + log.ZError(ctx, "Failed to unmarshal response body", err, "url", url, "respBody", string(respBody)) + return nil, err + } + + if baseResp.ErrCode != 0 { + err = fmt.Errorf(baseResp.ErrMsg) + log.ZError(ctx, "Failed to send request", err, "url", url, "reqbody", reqbody, "resp", baseResp) + return nil, err + } + + return baseResp.Data, nil +} + +func (st *StressTest) GetAdminToken(ctx context.Context) (string, error) { + req := auth.GetAdminTokenReq{ + Secret: st.Conf.Share.Secret, + UserID: st.AdminUserID, + } + + resp, err := st.PostRequest(ctx, ApiAddress+GetAdminToken, &req) + if err != nil { + return "", err + } + + data := &auth.GetAdminTokenResp{} + if err := json.Unmarshal(resp, &data); err != nil { + return "", err + } + + return data.Token, nil +} + +func (st *StressTest) CreateUser(ctx context.Context, userID string) (string, error) { + user := &sdkws.UserInfo{ + UserID: userID, + Nickname: userID, + } + + req := pbuser.UserRegisterReq{ + Users: []*sdkws.UserInfo{user}, + } + + _, err := st.PostRequest(ctx, ApiAddress+CreateUser, &req) + if err != nil { + return "", err + } + + st.UserCounter++ + return userID, nil +} + +func (st *StressTest) ImportFriend(ctx context.Context, userID string) error { + req := relation.ImportFriendReq{ + OwnerUserID: userID, + FriendUserIDs: TestTargetUserList, + } + + _, err := st.PostRequest(ctx, ApiAddress+ImportFriend, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) InviteToGroup(ctx context.Context, userID string) error { + req := group.InviteUserToGroupReq{ + GroupID: st.DefaultGroupID, + InvitedUserIDs: []string{userID}, + } + _, err := st.PostRequest(ctx, ApiAddress+InviteToGroup, &req) + if err != nil { + return err + } + + return nil +} + +func (st *StressTest) SendMsg(ctx context.Context, userID string) error { + contentObj := map[string]any{ + "content": fmt.Sprintf("index %d. The current time is %s", st.MsgCounter, time.Now().Format("2006-01-02 15:04:05.000")), + } + + req := &apistruct.SendMsgReq{ + SendMsg: apistruct.SendMsg{ + SendID: userID, + SenderNickname: userID, + GroupID: st.DefaultGroupID, + ContentType: constant.Text, + SessionType: constant.ReadGroupChatType, + Content: contentObj, + }, + } + + _, err := st.PostRequest(ctx, ApiAddress+SendMsg, &req) + if err != nil { + log.ZError(ctx, "Failed to send message", err, "userID", userID, "req", &req) + return err + } + + st.MsgCounter++ + + return nil +} + +func (st *StressTest) CreateGroup(ctx context.Context, userID string) (string, error) { + groupID := fmt.Sprintf("StressTestGroup_%d_%s", st.GroupCounter, time.Now().Format("20060102150405")) + + groupInfo := &sdkws.GroupInfo{ + GroupID: groupID, + GroupName: groupID, + GroupType: constant.WorkingGroup, + } + + req := group.CreateGroupReq{ + OwnerUserID: userID, + MemberUserIDs: TestTargetUserList, + GroupInfo: groupInfo, + } + + resp := group.CreateGroupResp{} + + response, err := st.PostRequest(ctx, ApiAddress+CreateGroup, &req) + if err != nil { + return "", err + } + + if err := json.Unmarshal(response, &resp); err != nil { + return "", err + } + + st.GroupCounter++ + + return resp.GroupInfo.GroupID, nil +} + +func main() { + var configPath string + // defaultConfigDir := filepath.Join("..", "..", "..", "..", "..", "config") + // flag.StringVar(&configPath, "c", defaultConfigDir, "config path") + flag.StringVar(&configPath, "c", "", "config path") + flag.Parse() + + if configPath == "" { + _, _ = fmt.Fprintln(os.Stderr, "config path is empty") + os.Exit(1) + return + } + + fmt.Printf(" Config Path: %s\n", configPath) + + share, apiConfig, err := initConfig(configPath) + if err != nil { + program.ExitWithError(err) + return + } + + ApiAddress = fmt.Sprintf("http://%s:%s", "127.0.0.1", fmt.Sprint(apiConfig.Api.Ports[0])) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + + defer cancel() + + st := &StressTest{ + Conf: &conf{ + Share: *share, + Api: *apiConfig, + }, + AdminUserID: share.IMAdminUserID[0], + Ctx: ctx, + Cancel: cancel, + HttpClient: &http.Client{ + Timeout: 50 * time.Second, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\nReceived stop signal, stopping...") + + select { + case <-ch: + default: + close(ch) + } + + st.Cancel() + }() + + token, err := st.GetAdminToken(st.Ctx) + if err != nil { + log.ZError(ctx, "Get Admin Token failed.", err, "AdminUserID", st.AdminUserID) + } + + st.AdminToken = token + fmt.Println("Admin Token:", st.AdminToken) + fmt.Println("ApiAddress:", ApiAddress) + + st.DefaultGroupID = DefaultGroupID + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateUserTicker) + defer ticker.Stop() + + for st.UserCounter < MaxUser { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create user", "reason", "context done") + return + + case <-ticker.C: + // Create User + userID := fmt.Sprintf("%d_Stresstest_%s", st.UserCounter, time.Now().Format("0102150405")) + + userCreatedID, err := st.CreateUser(st.Ctx, userID) + if err != nil { + log.ZError(st.Ctx, "Create User failed.", err, "UserID", userID) + os.Exit(1) + return + } + // fmt.Println("User Created ID:", userCreatedID) + + // Import Friend + if err = st.ImportFriend(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Import Friend failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + // Invite To Group + if err = st.InviteToGroup(st.Ctx, userCreatedID); err != nil { + log.ZError(st.Ctx, "Invite To Group failed.", err, "UserID", userCreatedID) + os.Exit(1) + return + } + + st.Once.Do(func() { + st.DefaultUserID = userCreatedID + fmt.Println("Default Send User Created ID:", userCreatedID) + close(ch) + }) + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(SendMessageTicker) + defer ticker.Stop() + <-ch + + for { + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Send message", "reason", "context done") + return + + case <-ticker.C: + // Send Message + if err = st.SendMsg(st.Ctx, st.DefaultUserID); err != nil { + log.ZError(st.Ctx, "Send Message failed.", err, "UserID", st.DefaultUserID) + continue + } + } + } + }() + + st.Wg.Add(1) + go func() { + defer st.Wg.Done() + + ticker := time.NewTicker(CreateGroupTicker) + defer ticker.Stop() + <-ch + + for st.GroupCounter < MaxGroup { + + select { + case <-st.Ctx.Done(): + log.ZInfo(st.Ctx, "Stop Create Group", "reason", "context done") + return + + case <-ticker.C: + + // Create Group + _, err := st.CreateGroup(st.Ctx, st.DefaultUserID) + if err != nil { + log.ZError(st.Ctx, "Create Group failed.", err, "UserID", st.DefaultUserID) + os.Exit(1) + return + } + + // fmt.Println("Group Created ID:", groupID) + } + } + }() + + st.Wg.Wait() +} From 27940fc0371b73e3c97656347642628c0044ee09 Mon Sep 17 00:00:00 2001 From: chao <48119764+withchao@users.noreply.github.com> Date: Tue, 22 Apr 2025 17:02:45 +0800 Subject: [PATCH 12/14] feat: GroupApplicationAgreeMemberEnterNotification splitting (#3297) * pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * fix: DeleteDoc crash * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: user msg timestamp * seq read config * seq read config * fix: the source message of the reference is withdrawn, and the referenced message is deleted * feat: optimize the default notification.yml * fix: shouldPushOffline * fix: the sorting is wrong after canceling the administrator in group settings * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * feat: Sending messages supports returning fields modified by webhook * fix: oss specifies content-type when uploading * fix: the version number contains a line break * fix: the version number contains a line break * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned * feat: GetConversationsHasReadAndMaxSeq support pinned * fix: transferring the group owner to a muted member, incremental version error * feat: GroupApplicationAgreeMemberEnterNotification splitting, rpc body size limit * feat: GroupApplicationAgreeMemberEnterNotification splitting, rpc body size limit --- config/share.yml | 6 ++- internal/rpc/group/group.go | 23 ++++++-- internal/rpc/group/notification.go | 6 ++- pkg/common/config/config.go | 12 +++-- pkg/common/startrpc/start.go | 87 ++++++++++++++++++++++++++---- 5 files changed, 114 insertions(+), 20 deletions(-) diff --git a/config/share.yml b/config/share.yml index a5fbeac75..7e463dde0 100644 --- a/config/share.yml +++ b/config/share.yml @@ -6,4 +6,8 @@ imAdminUserID: [ imAdmin ] multiLogin: policy: 1 # max num of tokens in one end - maxNumOneEnd: 30 \ No newline at end of file + maxNumOneEnd: 30 + +rpcMaxBodySize: + requestMaxBodySize: 8388608 + responseMaxBodySize: 8388608 diff --git a/internal/rpc/group/group.go b/internal/rpc/group/group.go index 602c4f3ee..e4fde51a5 100644 --- a/internal/rpc/group/group.go +++ b/internal/rpc/group/group.go @@ -451,12 +451,25 @@ func (g *groupServer) InviteUserToGroup(ctx context.Context, req *pbgroup.Invite return nil, err } - if err := g.db.CreateGroup(ctx, nil, groupMembers); err != nil { - return nil, err - } + const singleQuantity = 50 + for start := 0; start < len(groupMembers); start += singleQuantity { + end := start + singleQuantity + if end > len(groupMembers) { + end = len(groupMembers) + } + currentMembers := groupMembers[start:end] - if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, opUserID, req.InvitedUserIDs...); err != nil { - return nil, err + if err := g.db.CreateGroup(ctx, nil, currentMembers); err != nil { + return nil, err + } + + userIDs := datautil.Slice(currentMembers, func(e *model.GroupMember) string { + return e.UserID + }) + + if err = g.notification.GroupApplicationAgreeMemberEnterNotification(ctx, req.GroupID, req.SendMessage, opUserID, userIDs...); err != nil { + return nil, err + } } return &pbgroup.InviteUserToGroupResp{}, nil } diff --git a/internal/rpc/group/notification.go b/internal/rpc/group/notification.go index 1aa5333b4..bc08327b4 100644 --- a/internal/rpc/group/notification.go +++ b/internal/rpc/group/notification.go @@ -519,7 +519,11 @@ func (g *NotificationSender) MemberKickedNotification(ctx context.Context, tips g.Notification(ctx, mcontext.GetOpUserID(ctx), tips.Group.GroupID, constant.MemberKickedNotification, tips) } -func (g *NotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, invitedOpUserID string, entrantUserID ...string) error { +func (g *NotificationSender) GroupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, SendMessage *bool, invitedOpUserID string, entrantUserID ...string) error { + return g.groupApplicationAgreeMemberEnterNotification(ctx, groupID, SendMessage, invitedOpUserID, entrantUserID...) +} + +func (g *NotificationSender) groupApplicationAgreeMemberEnterNotification(ctx context.Context, groupID string, SendMessage *bool, invitedOpUserID string, entrantUserID ...string) error { var err error defer func() { if err != nil { diff --git a/pkg/common/config/config.go b/pkg/common/config/config.go index ca448083c..6b3bff30f 100644 --- a/pkg/common/config/config.go +++ b/pkg/common/config/config.go @@ -378,9 +378,15 @@ type AfterConfig struct { } type Share struct { - Secret string `mapstructure:"secret"` - IMAdminUserID []string `mapstructure:"imAdminUserID"` - MultiLogin MultiLogin `mapstructure:"multiLogin"` + Secret string `yaml:"secret"` + IMAdminUserID []string `yaml:"imAdminUserID"` + MultiLogin MultiLogin `yaml:"multiLogin"` + RPCMaxBodySize MaxRequestBody `yaml:"rpcMaxBodySize"` +} + +type MaxRequestBody struct { + RequestMaxBodySize int `yaml:"requestMaxBodySize"` + ResponseMaxBodySize int `yaml:"responseMaxBodySize"` } type MultiLogin struct { diff --git a/pkg/common/startrpc/start.go b/pkg/common/startrpc/start.go index 99df537f7..70ea885f7 100644 --- a/pkg/common/startrpc/start.go +++ b/pkg/common/startrpc/start.go @@ -22,6 +22,7 @@ import ( "net/http" "os" "os/signal" + "reflect" "strconv" "syscall" "time" @@ -46,8 +47,41 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -// Start rpc server. -func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP, +func init() { + prommetrics.RegistryAll() +} + +func getConfigRpcMaxRequestBody(value reflect.Value) *conf.MaxRequestBody { + for value.Kind() == reflect.Pointer { + value = value.Elem() + } + if value.Kind() == reflect.Struct { + num := value.NumField() + for i := 0; i < num; i++ { + field := value.Field(i) + if !field.CanInterface() { + continue + } + for field.Kind() == reflect.Pointer { + field = field.Elem() + } + switch elem := field.Interface().(type) { + case conf.Share: + return &elem.RPCMaxBodySize + case conf.MaxRequestBody: + return &elem + } + if field.Kind() == reflect.Struct { + if elem := getConfigRpcMaxRequestBody(field); elem != nil { + return elem + } + } + } + } + return nil +} + +func Start[T any](ctx context.Context, disc *conf.Discovery, prometheusConfig *conf.Prometheus, listenIP, registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T, watchConfigNames []string, watchServiceNames []string, rpcFn func(ctx context.Context, config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, @@ -65,6 +99,25 @@ func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConf conf.InitNotification(notification) } + maxRequestBody := getConfigRpcMaxRequestBody(reflect.ValueOf(config)) + + log.ZDebug(ctx, "rpc start", "rpcMaxRequestBody", maxRequestBody, "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "listenIP", listenIP) + + options = append(options, + mw.GrpcServer(), + ) + var clientOptions []grpc.DialOption + if maxRequestBody != nil { + if maxRequestBody.RequestMaxBodySize > 0 { + options = append(options, grpc.MaxRecvMsgSize(maxRequestBody.RequestMaxBodySize)) + clientOptions = append(clientOptions, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(maxRequestBody.RequestMaxBodySize))) + } + if maxRequestBody.ResponseMaxBodySize > 0 { + options = append(options, grpc.MaxSendMsgSize(maxRequestBody.ResponseMaxBodySize)) + clientOptions = append(clientOptions, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxRequestBody.ResponseMaxBodySize))) + } + } + registerIP, err := network.GetRpcRegisterIP(registerIP) if err != nil { return err @@ -101,15 +154,29 @@ func Start[T any](ctx context.Context, discovery *conf.Discovery, prometheusConf } defer client.Close() - client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) + client.AddOption( + mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")), + ) + if len(clientOptions) > 0 { + client.AddOption(clientOptions...) + } - // var reg *prometheus.Registry - // var metric *grpcprometheus.ServerMetrics - if prometheusConfig.Enable { - // cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share) - // reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics) - // options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()), - // grpc.UnaryInterceptor(metric.UnaryServerInterceptor())) + ctx, cancel := context.WithCancelCause(ctx) + + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL) + select { + case <-ctx.Done(): + return + case val := <-sigs: + log.ZDebug(ctx, "recv signal", "signal", val.String()) + cancel(fmt.Errorf("signal %s", val.String())) + } + }() + + if prometheusListenAddr != "" { options = append( options, mw.GrpcServer(), prommetricsUnaryInterceptor(rpcRegisterName), From 841c4b00260dc9c25e6df35623642c3cf29aa879 Mon Sep 17 00:00:00 2001 From: icey-yu <119291641+icey-yu@users.noreply.github.com> Date: Thu, 24 Apr 2025 17:26:52 +0800 Subject: [PATCH 13/14] fix: data version SetVersion will add record (#3304) --- tools/seq/internal/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/seq/internal/main.go b/tools/seq/internal/main.go index 7e5d5598c..9fd352a96 100644 --- a/tools/seq/internal/main.go +++ b/tools/seq/internal/main.go @@ -337,7 +337,7 @@ func SetVersion(coll *mongo.Collection, key string, version int) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() option := options.Update().SetUpsert(true) - filter := bson.M{"key": key, "value": strconv.Itoa(version)} + filter := bson.M{"key": key} update := bson.M{"$set": bson.M{"key": key, "value": strconv.Itoa(version)}} return mongoutil.UpdateOne(ctx, coll, filter, update, false, option) } From 78aaf6abce679ab1091f48eb7e2590748fafd4ad Mon Sep 17 00:00:00 2001 From: icey-yu <119291641+icey-yu@users.noreply.github.com> Date: Tue, 6 May 2025 15:10:10 +0800 Subject: [PATCH 14/14] fix: delete token by correct platformID && feat: adminToken can be retained for five minutes after deleting (#3313) --- internal/rpc/auth/auth.go | 17 +- pkg/common/storage/cache/cachekey/token.go | 7 +- pkg/common/storage/cache/mcache/token.go | 166 +++++++++++++++++++ pkg/common/storage/cache/redis/token.go | 73 +++++++- pkg/common/storage/cache/token.go | 3 + pkg/common/storage/controller/auth.go | 88 ++++++---- pkg/common/storage/database/mgo/cache.go | 183 +++++++++++++++++++++ 7 files changed, 492 insertions(+), 45 deletions(-) create mode 100644 pkg/common/storage/cache/mcache/token.go create mode 100644 pkg/common/storage/database/mgo/cache.go diff --git a/internal/rpc/auth/auth.go b/internal/rpc/auth/auth.go index 2e64c365c..d34630b2f 100644 --- a/internal/rpc/auth/auth.go +++ b/internal/rpc/auth/auth.go @@ -140,15 +140,17 @@ func (s *authServer) parseToken(ctx context.Context, tokensString string) (claim if err != nil { return nil, err } - isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID) - if isAdmin { - return claims, nil - } m, err := s.authDatabase.GetTokensWithoutError(ctx, claims.UserID, claims.PlatformID) if err != nil { return nil, err } if len(m) == 0 { + isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID) + if isAdmin { + if err = s.authDatabase.GetTemporaryTokensWithoutError(ctx, claims.UserID, claims.PlatformID, tokensString); err == nil { + return claims, nil + } + } return nil, servererrs.ErrTokenNotExist.Wrap() } if v, ok := m[tokensString]; ok { @@ -160,6 +162,13 @@ func (s *authServer) parseToken(ctx context.Context, tokensString string) (claim default: return nil, errs.Wrap(errs.ErrTokenUnknown) } + } else { + isAdmin := authverify.IsManagerUserID(claims.UserID, s.config.Share.IMAdminUserID) + if isAdmin { + if err = s.authDatabase.GetTemporaryTokensWithoutError(ctx, claims.UserID, claims.PlatformID, tokensString); err == nil { + return claims, nil + } + } } return nil, servererrs.ErrTokenNotExist.Wrap() } diff --git a/pkg/common/storage/cache/cachekey/token.go b/pkg/common/storage/cache/cachekey/token.go index 83ba2f211..6fe1bdfef 100644 --- a/pkg/common/storage/cache/cachekey/token.go +++ b/pkg/common/storage/cache/cachekey/token.go @@ -1,8 +1,9 @@ package cachekey import ( - "github.com/openimsdk/protocol/constant" "strings" + + "github.com/openimsdk/protocol/constant" ) const ( @@ -13,6 +14,10 @@ func GetTokenKey(userID string, platformID int) string { return UidPidToken + userID + ":" + constant.PlatformIDToName(platformID) } +func GetTemporaryTokenKey(userID string, platformID int, token string) string { + return UidPidToken + ":TEMPORARY:" + userID + ":" + constant.PlatformIDToName(platformID) + ":" + token +} + func GetAllPlatformTokenKey(userID string) []string { res := make([]string, len(constant.PlatformID2Name)) for k := range constant.PlatformID2Name { diff --git a/pkg/common/storage/cache/mcache/token.go b/pkg/common/storage/cache/mcache/token.go new file mode 100644 index 000000000..98b9cc066 --- /dev/null +++ b/pkg/common/storage/cache/mcache/token.go @@ -0,0 +1,166 @@ +package mcache + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/log" +) + +func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel { + c := &tokenCache{cache: cache} + c.accessExpire = c.getExpireTime(accessExpire) + return c +} + +type tokenCache struct { + cache database.Cache + accessExpire time.Duration +} + +func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string { + return cachekey.GetTokenKey(userID, platformID) + ":" + token +} + +func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error { + return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire) +} + +// SetTokenFlagEx set token and flag with expire time +func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error { + return x.SetTokenFlag(ctx, userID, platformID, token, flag) +} + +func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) { + prefix := x.getTokenKey(userID, platformID, "") + m, err := x.cache.Prefix(ctx, prefix) + if err != nil { + return nil, errs.Wrap(err) + } + mm := make(map[string]int) + for k, v := range m { + state, err := strconv.Atoi(v) + if err != nil { + log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID) + continue + } + mm[strings.TrimPrefix(k, prefix)] = state + } + return mm, nil +} + +func (x *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error { + key := cachekey.GetTemporaryTokenKey(userID, platformID, token) + if _, err := x.cache.Get(ctx, []string{key}); err != nil { + return err + } + return nil +} + +func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) { + prefix := cachekey.UidPidToken + userID + ":" + tokens, err := x.cache.Prefix(ctx, prefix) + if err != nil { + return nil, err + } + res := make(map[int]map[string]int) + for key, flagStr := range tokens { + flag, err := strconv.Atoi(flagStr) + if err != nil { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2) + if len(arr) != 2 { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + platformID, err := strconv.Atoi(arr[0]) + if err != nil { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + token := arr[1] + if token == "" { + log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID) + continue + } + tk, ok := res[platformID] + if !ok { + tk = make(map[string]int) + res[platformID] = tk + } + tk[token] = flag + } + return res, nil +} + +func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error { + for token, flag := range m { + err := x.SetTokenFlag(ctx, userID, platformID, token, flag) + if err != nil { + return err + } + } + return nil +} + +func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error { + for prefix, tokenFlag := range tokens { + for token, flag := range tokenFlag { + flagStr := fmt.Sprintf("%v", flag) + if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil { + return err + } + } + } + return nil +} + +func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error { + keys := make([]string, 0, len(fields)) + for _, token := range fields { + keys = append(keys, x.getTokenKey(userID, platformID, token)) + } + return x.cache.Del(ctx, keys) +} + +func (x *tokenCache) getExpireTime(t int64) time.Duration { + return time.Hour * 24 * time.Duration(t) +} + +func (x *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error { + keys := make([]string, 0, len(tokens)) + for platformID, ts := range tokens { + for _, t := range ts { + keys = append(keys, x.getTokenKey(userID, platformID, t)) + } + } + return x.cache.Del(ctx, keys) +} + +func (x *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error { + keys := make([]string, 0, len(fields)) + for _, f := range fields { + keys = append(keys, x.getTokenKey(userID, platformID, f)) + } + if err := x.cache.Del(ctx, keys); err != nil { + return err + } + + for _, f := range fields { + k := cachekey.GetTemporaryTokenKey(userID, platformID, f) + if err := x.cache.Set(ctx, k, "", time.Minute*5); err != nil { + return errs.Wrap(err) + } + } + + return nil +} diff --git a/pkg/common/storage/cache/redis/token.go b/pkg/common/storage/cache/redis/token.go index 510da43e3..b3870daee 100644 --- a/pkg/common/storage/cache/redis/token.go +++ b/pkg/common/storage/cache/redis/token.go @@ -9,6 +9,7 @@ import ( "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey" "github.com/openimsdk/tools/errs" + "github.com/openimsdk/tools/utils/datautil" "github.com/redis/go-redis/v9" ) @@ -55,6 +56,14 @@ func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, p return mm, nil } +func (c *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error { + err := c.rdb.Get(ctx, cachekey.GetTemporaryTokenKey(userID, platformID, token)).Err() + if err != nil { + return errs.Wrap(err) + } + return nil +} + func (c *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) { var ( res = make(map[int]map[string]int) @@ -101,13 +110,19 @@ func (c *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, pla } func (c *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error { - pipe := c.rdb.Pipeline() - for k, v := range tokens { - pipe.HSet(ctx, k, v) - } - _, err := pipe.Exec(ctx) - if err != nil { - return errs.Wrap(err) + keys := datautil.Keys(tokens) + if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { + pipe := c.rdb.Pipeline() + for k, v := range tokens { + pipe.HSet(ctx, k, v) + } + _, err := pipe.Exec(ctx) + if err != nil { + return errs.Wrap(err) + } + return nil + }); err != nil { + return err } return nil } @@ -119,3 +134,47 @@ func (c *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, pla func (c *tokenCache) getExpireTime(t int64) time.Duration { return time.Hour * 24 * time.Duration(t) } + +// DeleteTokenByTokenMap tokens key is platformID, value is token slice +func (c *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error { + var ( + keys = make([]string, 0, len(tokens)) + keyMap = make(map[string][]string) + ) + for k, v := range tokens { + k1 := cachekey.GetTokenKey(userID, k) + keys = append(keys, k1) + keyMap[k1] = v + } + + if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error { + pipe := c.rdb.Pipeline() + for k, v := range tokens { + pipe.HDel(ctx, cachekey.GetTokenKey(userID, k), v...) + } + _, err := pipe.Exec(ctx) + if err != nil { + return errs.Wrap(err) + } + return nil + }); err != nil { + return err + } + + return nil +} + +func (c *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error { + key := cachekey.GetTokenKey(userID, platformID) + if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil { + return errs.Wrap(err) + } + for _, f := range fields { + k := cachekey.GetTemporaryTokenKey(userID, platformID, f) + if err := c.rdb.Set(ctx, k, "", time.Minute*5).Err(); err != nil { + return errs.Wrap(err) + } + } + + return nil +} diff --git a/pkg/common/storage/cache/token.go b/pkg/common/storage/cache/token.go index e5e0a9383..441c08939 100644 --- a/pkg/common/storage/cache/token.go +++ b/pkg/common/storage/cache/token.go @@ -9,8 +9,11 @@ type TokenModel interface { // SetTokenFlagEx set token and flag with expire time SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) + HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error + DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error + DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error } diff --git a/pkg/common/storage/controller/auth.go b/pkg/common/storage/controller/auth.go index f9061a73b..496a434bf 100644 --- a/pkg/common/storage/controller/auth.go +++ b/pkg/common/storage/controller/auth.go @@ -17,6 +17,8 @@ import ( type AuthDatabase interface { // If the result is empty, no error is returned. GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) + + GetTemporaryTokensWithoutError(ctx context.Context, userID string, platformID int, token string) error // Create token CreateToken(ctx context.Context, userID string, platformID int) (string, error) @@ -51,6 +53,10 @@ func (a *authDatabase) GetTokensWithoutError(ctx context.Context, userID string, return a.cache.GetTokensWithoutError(ctx, userID, platformID) } +func (a *authDatabase) GetTemporaryTokensWithoutError(ctx context.Context, userID string, platformID int, token string) error { + return a.cache.HasTemporaryToken(ctx, userID, platformID, token) +} + func (a *authDatabase) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error { return a.cache.SetTokenMapByUidPid(ctx, userID, platformID, m) } @@ -86,19 +92,20 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI return "", err } - deleteTokenKey, kickedTokenKey, err := a.checkToken(ctx, tokens, platformID) + deleteTokenKey, kickedTokenKey, adminTokens, err := a.checkToken(ctx, tokens, platformID) + if err != nil { + return "", err + } + if len(deleteTokenKey) != 0 { + err = a.cache.DeleteTokenByTokenMap(ctx, userID, deleteTokenKey) if err != nil { return "", err } - if len(deleteTokenKey) != 0 { - err = a.cache.DeleteTokenByUidPid(ctx, userID, platformID, deleteTokenKey) - if err != nil { - return "", err - } - } - if len(kickedTokenKey) != 0 { - for _, k := range kickedTokenKey { - err := a.cache.SetTokenFlagEx(ctx, userID, platformID, k, constant.KickedToken) + } + if len(kickedTokenKey) != 0 { + for plt, ks := range kickedTokenKey { + for _, k := range ks { + err := a.cache.SetTokenFlagEx(ctx, userID, plt, k, constant.KickedToken) if err != nil { return "", err } @@ -106,6 +113,11 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI } } } + if len(adminTokens) != 0 { + if err = a.cache.DeleteAndSetTemporary(ctx, userID, constant.AdminPlatformID, adminTokens); err != nil { + return "", err + } + } claims := tokenverify.BuildClaims(userID, platformID, a.accessExpire) token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) @@ -123,12 +135,13 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI return tokenString, nil } -func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string]int, platformID int) ([]string, []string, error) { - // todo: Move the logic for handling old data to another location. +// checkToken will check token by tokenPolicy and return deleteToken,kickToken,deleteAdminToken +func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string]int, platformID int) (map[int][]string, map[int][]string, []string, error) { + // todo: Asynchronous deletion of old data. var ( loginTokenMap = make(map[int][]string) // The length of the value of the map must be greater than 0 - deleteToken = make([]string, 0) - kickToken = make([]string, 0) + deleteToken = make(map[int][]string) + kickToken = make(map[int][]string) adminToken = make([]string, 0) unkickTerminal = "" ) @@ -137,7 +150,7 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string for k, v := range tks { _, err := tokenverify.GetClaimFromToken(k, authverify.Secret(a.accessSecret)) if err != nil || v != constant.NormalToken { - deleteToken = append(deleteToken, k) + deleteToken[plfID] = append(deleteToken[plfID], k) } else { if plfID != constant.AdminPlatformID { loginTokenMap[plfID] = append(loginTokenMap[plfID], k) @@ -157,14 +170,15 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string } limit := a.multiLogin.MaxNumOneEnd if l > limit { - kickToken = append(kickToken, ts[:l-limit]...) + kickToken[plt] = ts[:l-limit] } } case constant.AllLoginButSameTermKick: for plt, ts := range loginTokenMap { - kickToken = append(kickToken, ts[:len(ts)-1]...) + kickToken[plt] = ts[:len(ts)-1] + if plt == platformID { - kickToken = append(kickToken, ts[len(ts)-1]) + kickToken[plt] = append(kickToken[plt], ts[len(ts)-1]) } } case constant.PCAndOther: @@ -172,29 +186,33 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string if constant.PlatformIDToClass(platformID) != unkickTerminal { for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) != unkickTerminal { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } else { var ( - preKick []string - isReserve = true + preKickToken string + preKickPlt int + reserveToken = false ) for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) != unkickTerminal { // Keep a token from another end - if isReserve { - isReserve = false - kickToken = append(kickToken, ts[:len(ts)-1]...) - preKick = append(preKick, ts[len(ts)-1]) + if !reserveToken { + reserveToken = true + kickToken[plt] = ts[:len(ts)-1] + preKickToken = ts[len(ts)-1] + preKickPlt = plt continue } else { // Prioritize keeping Android if plt == constant.AndroidPlatformID { - kickToken = append(kickToken, preKick...) - kickToken = append(kickToken, ts[:len(ts)-1]...) + if preKickToken != "" { + kickToken[preKickPlt] = append(kickToken[preKickPlt], preKickToken) + } + kickToken[plt] = ts[:len(ts)-1] } else { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } @@ -207,19 +225,19 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string for plt, ts := range loginTokenMap { if constant.PlatformIDToClass(plt) == constant.PlatformIDToClass(platformID) { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } else { if _, ok := reserved[constant.PlatformIDToClass(plt)]; !ok { reserved[constant.PlatformIDToClass(plt)] = struct{}{} - kickToken = append(kickToken, ts[:len(ts)-1]...) + kickToken[plt] = ts[:len(ts)-1] continue } else { - kickToken = append(kickToken, ts...) + kickToken[plt] = ts } } } default: - return nil, nil, errs.New("unknown multiLogin policy").Wrap() + return nil, nil, nil, errs.New("unknown multiLogin policy").Wrap() } //var adminTokenMaxNum = a.multiLogin.MaxNumOneEnd @@ -233,5 +251,9 @@ func (a *authDatabase) checkToken(ctx context.Context, tokens map[int]map[string //if l > adminTokenMaxNum { // kickToken = append(kickToken, adminToken[:l-adminTokenMaxNum]...) //} - return deleteToken, kickToken, nil + var deleteAdminToken []string + if platformID == constant.AdminPlatformID { + deleteAdminToken = adminToken + } + return deleteToken, kickToken, deleteAdminToken, nil } diff --git a/pkg/common/storage/database/mgo/cache.go b/pkg/common/storage/database/mgo/cache.go new file mode 100644 index 000000000..991dfa874 --- /dev/null +++ b/pkg/common/storage/database/mgo/cache.go @@ -0,0 +1,183 @@ +package mgo + +import ( + "context" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database" + "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model" + "github.com/openimsdk/tools/db/mongoutil" + "github.com/openimsdk/tools/errs" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func NewCacheMgo(db *mongo.Database) (*CacheMgo, error) { + coll := db.Collection(database.CacheName) + _, err := coll.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "key", Value: 1}, + }, + Options: options.Index().SetUnique(true), + }, + { + Keys: bson.D{ + {Key: "expire_at", Value: 1}, + }, + Options: options.Index().SetExpireAfterSeconds(0), + }, + }) + if err != nil { + return nil, errs.Wrap(err) + } + return &CacheMgo{coll: coll}, nil +} + +type CacheMgo struct { + coll *mongo.Collection +} + +func (x *CacheMgo) findToMap(res []model.Cache, now time.Time) map[string]string { + kv := make(map[string]string) + for _, re := range res { + if re.ExpireAt != nil && re.ExpireAt.Before(now) { + continue + } + kv[re.Key] = re.Value + } + return kv + +} + +func (x *CacheMgo) Get(ctx context.Context, key []string) (map[string]string, error) { + if len(key) == 0 { + return nil, nil + } + now := time.Now() + res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{ + "key": bson.M{"$in": key}, + "$or": []bson.M{ + {"expire_at": bson.M{"$gt": now}}, + {"expire_at": nil}, + }, + }) + if err != nil { + return nil, err + } + return x.findToMap(res, now), nil +} + +func (x *CacheMgo) Prefix(ctx context.Context, prefix string) (map[string]string, error) { + now := time.Now() + res, err := mongoutil.Find[model.Cache](ctx, x.coll, bson.M{ + "key": bson.M{"$regex": "^" + prefix}, + "$or": []bson.M{ + {"expire_at": bson.M{"$gt": now}}, + {"expire_at": nil}, + }, + }) + if err != nil { + return nil, err + } + return x.findToMap(res, now), nil +} + +func (x *CacheMgo) Set(ctx context.Context, key string, value string, expireAt time.Duration) error { + cv := &model.Cache{ + Key: key, + Value: value, + } + if expireAt > 0 { + now := time.Now().Add(expireAt) + cv.ExpireAt = &now + } + opt := options.Update().SetUpsert(true) + return mongoutil.UpdateOne(ctx, x.coll, bson.M{"key": key}, bson.M{"$set": cv}, false, opt) +} + +func (x *CacheMgo) Incr(ctx context.Context, key string, value int) (int, error) { + pipeline := mongo.Pipeline{ + { + {"$set", bson.M{ + "value": bson.M{ + "$toString": bson.M{ + "$add": bson.A{ + bson.M{"$toInt": "$value"}, + value, + }, + }, + }, + }}, + }, + } + opt := options.FindOneAndUpdate().SetReturnDocument(options.After) + res, err := mongoutil.FindOneAndUpdate[model.Cache](ctx, x.coll, bson.M{"key": key}, pipeline, opt) + if err != nil { + return 0, err + } + return strconv.Atoi(res.Value) +} + +func (x *CacheMgo) Del(ctx context.Context, key []string) error { + if len(key) == 0 { + return nil + } + _, err := x.coll.DeleteMany(ctx, bson.M{"key": bson.M{"$in": key}}) + return errs.Wrap(err) +} + +func (x *CacheMgo) lockKey(key string) string { + return "LOCK_" + key +} + +func (x *CacheMgo) Lock(ctx context.Context, key string, duration time.Duration) (string, error) { + tmp, err := uuid.NewUUID() + if err != nil { + return "", err + } + if duration <= 0 || duration > time.Minute*10 { + duration = time.Minute * 10 + } + cv := &model.Cache{ + Key: x.lockKey(key), + Value: tmp.String(), + ExpireAt: nil, + } + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + wait := func() error { + timeout := time.NewTimer(time.Millisecond * 100) + defer timeout.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timeout.C: + return nil + } + } + for { + if err := mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": key, "expire_at": bson.M{"$lt": time.Now()}}); err != nil { + return "", err + } + expireAt := time.Now().Add(duration) + cv.ExpireAt = &expireAt + if err := mongoutil.InsertMany[*model.Cache](ctx, x.coll, []*model.Cache{cv}); err != nil { + if mongo.IsDuplicateKeyError(err) { + if err := wait(); err != nil { + return "", err + } + continue + } + return "", err + } + return cv.Value, nil + } +} + +func (x *CacheMgo) Unlock(ctx context.Context, key string, value string) error { + return mongoutil.DeleteOne(ctx, x.coll, bson.M{"key": x.lockKey(key), "value": value}) +}