mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-12-07 21:17:17 +08:00
Merge branch 'openimsdk:main' into main
This commit is contained in:
commit
8c7cf042ad
69
.github/workflows/auto-gh-pr.yml
vendored
69
.github/workflows/auto-gh-pr.yml
vendored
@ -24,38 +24,49 @@ on:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
create-pr:
|
||||
sync-issue-to-pr:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.base.ref == 'main'
|
||||
# && github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Sync Issue to PR
|
||||
if: github.event_name == 'pull_request' && github.event.pull_request.base.ref == 'main'
|
||||
run: |
|
||||
PR_BODY="${{ github.event.pull_request.body }}"
|
||||
|
||||
- name: Create PR to release branch
|
||||
run: |
|
||||
ISSUEID=$(gh pr view ${{ github.event.pull_request.number }} --repo $OWNER/$REPO | grep -oP 'Fixes #\K\d+')
|
||||
echo "===========> $ISSUEID"
|
||||
ISSUE=$(gh issue view $ISSUEID --repo $OWNER/$REPO --json labels,assignees,milestone,title)
|
||||
echo "===========> $ISSUE"
|
||||
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oP 'Fixes #\K\d+')
|
||||
if [[ -z "$ISSUE_NUMBER" ]]; then
|
||||
echo "No Issue number found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LABELS=$(echo $ISSUE | jq -r '.labels[] | select(.name) | .name' | jq -R -r -s -c 'split("\n")[:-1] | join(",")')
|
||||
ASSIGNEES=$(echo $ISSUE | jq -r '.assignees[] | select(.login) | .login' | jq -R -s -c 'split("\n")[:-1] | join(",")')
|
||||
MILESTONE=$(echo $ISSUE | jq -r '.milestone | select(.title) | .title')
|
||||
TITLE=$(echo $ISSUE | jq -r '.title')
|
||||
echo "Issue number found: $ISSUE_NUMBER"
|
||||
|
||||
gh pr edit ${{ github.event.pull_request.number }} --repo $OWNER/$REPO --add-label "$LABELS" --add-assignee "$ASSIGNEES" --milestone "$MILESTONE"
|
||||
# Using GitHub CLI to get issue details
|
||||
gh issue view "$ISSUE_NUMBER" --repo "${{ github.repository }}" --json labels,assignees,milestone,title > issue_data.json
|
||||
|
||||
# git checkout -b bot/merge-to-release-$ISSUEID
|
||||
# git push origin bot/merge-to-release-$ISSUEID
|
||||
# gh pr create --base release --head bot/merge-to-release-$ISSUEID --title "Merge main to release" --body ""
|
||||
# gh pr create --base main --head feat/auto-release-pr-624 --title "The bug is fixed" --body "$x" --repo openimsdk/open-im-server --reviewer "cubxxw"
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
ISSUE: ${{ github.event.issue.html_url }}
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
REPO: ${{ github.event.repository.name }}
|
||||
# Check if jq is installed
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "Installing jq..."
|
||||
sudo apt-get install -y jq
|
||||
fi
|
||||
|
||||
# Parse data with jq
|
||||
LABELS=$(jq -r '.labels | map(.name) | join(",")' issue_data.json)
|
||||
ASSIGNEES=$(jq -r '.assignees | map(.login) | join(",")' issue_data.json)
|
||||
MILESTONE=$(jq -r '.milestone.title' issue_data.json)
|
||||
|
||||
# Check if any of the fields are empty and set them to None
|
||||
LABELS=${LABELS:-None}
|
||||
ASSIGNEES=${ASSIGNEES:-None}
|
||||
MILESTONE=${MILESTONE:-None}
|
||||
|
||||
# Edit the PR with issue details, handling empty fields
|
||||
gh pr edit "${{ github.event.pull_request.number }}" --repo "${{ github.repository }}" \
|
||||
${LABELS:+--add-label "$LABELS"} \
|
||||
${ASSIGNEES:+--add-assignee "$ASSIGNEES"} \
|
||||
${MILESTONE:+--milestone "$MILESTONE"}
|
||||
continue-on-error: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/auto-invite.yml
vendored
2
.github/workflows/auto-invite.yml
vendored
@ -39,7 +39,7 @@ jobs:
|
||||
In addition to Slack, we also offer the following ways to get in touch:
|
||||
|
||||
+ <a href="https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" target="_blank"><img src="https://img.shields.io/badge/Slack-OpenIM%2B-blueviolet?logo=slack&logoColor=white"></a> We also have Slack channels for you to communicate and discuss. To join, visit https://slack.com/ and join our [👀 Open-IM-Server slack](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) team channel.
|
||||
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a> Get in touch with us on [Gmail](https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
|
||||
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=info@openim.io" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a> Get in touch with us on [Gmail](https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
|
||||
+ <a href="https://doc.rentsoft.cn/" target="_blank"><img src="https://img.shields.io/badge/%E5%8D%9A%E5%AE%A2-%40OpenIMSDKCore-blue?style=social&logo=Octopus%20Deploy"></a> Read our [blog](https://doc.rentsoft.cn/). Our blog is a great place to stay up-to-date with Open-IM-Server projects and trends. On the blog, we share our latest developments, tech trends, and other interesting information.
|
||||
+ <a href="https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg" target="_blank"><img src="https://img.shields.io/badge/%E5%BE%AE%E4%BF%A1-OpenIMSDKCore-brightgreen?logo=wechat&style=flat-square"></a> Add [Wechat](https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg) and indicate that you are a user or developer of Open-IM-Server. We will process your request as soon as possible.
|
||||
|
||||
|
||||
2
.github/workflows/auto-tag.yml
vendored
2
.github/workflows/auto-tag.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Create Tag
|
||||
name: OpenIM Create Tag
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
|
||||
2
.github/workflows/bot-auto-cherry-pick.yml
vendored
2
.github/workflows/bot-auto-cherry-pick.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: 'Github Rebot for Cherry Pick when PR is merged'
|
||||
name: Github Rebot for Cherry Pick when PR is merged
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
|
||||
2
.github/workflows/check-coverage.yml
vendored
2
.github/workflows/check-coverage.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Check-Coverage
|
||||
name: OpenIM Check Coverage
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
4
.github/workflows/cla.yml
vendored
4
.github/workflows/cla.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "OpenIM CLA Assistant"
|
||||
name: OpenIM CLA Assistant
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
@ -33,7 +33,7 @@ env:
|
||||
OPEN_IM_SERVER_CLA_DOCUMENT: https://github.com/openim-sigs/cla/blob/main/README.md
|
||||
OPEN_IM_SERVER_SIGNATURES_PATH: signatures/${{ github.event.repository.name }}/cla.json
|
||||
|
||||
OPEN_IM_SERVER_ALLOWLIST: kubbot,bot*
|
||||
OPEN_IM_SERVER_ALLOWLIST: kubbot,bot*,bot-*,bot/*,bot-/*,bot,*[bot]
|
||||
|
||||
jobs:
|
||||
CLAAssistant:
|
||||
|
||||
63
.github/workflows/create_branch_on_tag.yml
vendored
63
.github/workflows/create_branch_on_tag.yml
vendored
@ -1,17 +1,3 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Create Branch on Tag
|
||||
|
||||
on:
|
||||
@ -19,6 +5,10 @@ on:
|
||||
tags:
|
||||
- 'v*.*.0'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
actions: write
|
||||
|
||||
jobs:
|
||||
create-branch:
|
||||
runs-on: ubuntu-latest
|
||||
@ -36,41 +26,38 @@ jobs:
|
||||
- name: Install git-chglog
|
||||
run: make install.git-chglog
|
||||
|
||||
- name: Create Branch
|
||||
- name: Create Branch and Push
|
||||
env:
|
||||
TAG_NAME: ${{ github.ref_name }}
|
||||
run: |
|
||||
TAG_NAME=${GITHUB_REF/refs\/tags\//}
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME"
|
||||
if [[ "${VERSION_PARTS[2]}" = "0" ]]; then
|
||||
BRANCH_NAME="release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}"
|
||||
echo "Creating branch $BRANCH_NAME"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME"
|
||||
else
|
||||
echo "Not a release tag. Skipping branch creation."
|
||||
fi
|
||||
continue-on-error: true
|
||||
|
||||
- name: Push Changes
|
||||
uses: stefanzweifel/git-auto-commit-action@v5
|
||||
with:
|
||||
commit_message: "Auto Commit CHANGELOG"
|
||||
branch: release-v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and Push CHANGELOG
|
||||
- name: Create and Commit CHANGELOG
|
||||
if: endsWith(github.ref_name, '.0')
|
||||
run: |
|
||||
git checkout main
|
||||
TAG_NAME=${GITHUB_REF/refs\/tags\//}
|
||||
git fetch --all
|
||||
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$TAG_NAME"
|
||||
if [[ "${VERSION_PARTS[2]}" = "0" ]]; then
|
||||
cd CHANGELOG
|
||||
git-chglog --tag-filter-pattern "v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.*" -o "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
|
||||
git add "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
|
||||
git commit -m "Update CHANGELOG for $TAG_NAME"
|
||||
fi
|
||||
git checkout main
|
||||
cd CHANGELOG
|
||||
git-chglog --tag-filter-pattern "v${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.*" -o "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
|
||||
git add "CHANGELOG-${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.md"
|
||||
git commit -m "Update CHANGELOG for $TAG_NAME" || echo "No changes to commit."
|
||||
continue-on-error: true
|
||||
|
||||
- name: Push Changes
|
||||
uses: stefanzweifel/git-auto-commit-action@v5
|
||||
- name: Push CHANGELOG to Main
|
||||
if: steps.create-and-commit-changelog.outputs.changes == 'true'
|
||||
uses: ad-m/github-push-action@v0.6.0
|
||||
with:
|
||||
commit_message: "Auto Commit CHANGELOG"
|
||||
github_token: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
branch: main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
continue-on-error: true
|
||||
|
||||
55
.github/workflows/deploy.yml
vendored
55
.github/workflows/deploy.yml
vendored
@ -1,55 +0,0 @@
|
||||
# Copyright © 2023 OpenIM open source community. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: OpenIM Deploy for dev
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'devops' # Only for the dev branch
|
||||
- 'main'
|
||||
paths:
|
||||
- '.github/workflows/*'
|
||||
# - '__test__/**' # dev No immediate testing is required
|
||||
- 'src/**'
|
||||
- 'Dockerfile'
|
||||
- 'docker-compose.yml'
|
||||
- 'bin/*'
|
||||
|
||||
jobs:
|
||||
deploy-dev:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: executing remote ssh commands using password
|
||||
uses: appleboy/ssh-action@v1.0.0
|
||||
env:
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
REPO: ${{ github.event.repository.name }}
|
||||
with:
|
||||
host: "${{ secrets.SG_M1_HOST }}, ${{ secrets.SG_N1_HOST }}, ${{ secrets.SG_N2_HOST}}"
|
||||
username: ${{ secrets.SG_USERNAME }}
|
||||
password: ${{ secrets.SG_PASSWORD }}
|
||||
port: ${{ secrets.SG_PORT }}
|
||||
envs: OWNER,REPO
|
||||
script_stop: true
|
||||
script: |
|
||||
mkdir -p /test/openim
|
||||
cd /test/openim
|
||||
pwd;ls -al
|
||||
echo "OWNER: $OWNER"
|
||||
echo "REPO: $REPO"
|
||||
git clone -b develop https://github.com/${OWNER}/${REPO}.git; cd ${REPO}
|
||||
docker compose up -d
|
||||
continue-on-error: true
|
||||
2
.github/workflows/depsreview.yaml
vendored
2
.github/workflows/depsreview.yaml
vendored
@ -2,7 +2,7 @@
|
||||
# Licensed under the MIT License (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
name: Dependency Review
|
||||
name: OpenIM Dependency Review
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/e2e-test.yml
vendored
2
.github/workflows/e2e-test.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e
|
||||
name: OpenIM E2E Test
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
2
.github/workflows/greetings.yml
vendored
2
.github/workflows/greetings.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: first-interaction
|
||||
name: OpenIM First Interaction
|
||||
|
||||
on:
|
||||
issues:
|
||||
|
||||
2
.github/workflows/issue-robot.yml
vendored
2
.github/workflows/issue-robot.yml
vendored
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: 'issue translator'
|
||||
name: OpenIM Issue Aotu Translator
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
20
.github/workflows/opencommit.yml
vendored
20
.github/workflows/opencommit.yml
vendored
@ -12,12 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: 'OpenIM Commit Action'
|
||||
name: OpenIM OpenCommit Action
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
# this list of branches is often enough,
|
||||
# but you may still ignore other public branches
|
||||
branches-ignore: [main master dev development release]
|
||||
|
||||
jobs:
|
||||
opencommit:
|
||||
@ -27,20 +28,20 @@ jobs:
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Setup Node.js Environment
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: di-sukharev/opencommit@github-action-v1.0.4
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
|
||||
env:
|
||||
# set openAI api key in repo actions secrets,
|
||||
# for openAI keys go to: https://platform.openai.com/account/api-keys
|
||||
# for repo secret go to: https://github.com/kuebcub/settings/secrets/actions
|
||||
# for repo secret go to: <your_repo_url>/settings/secrets/actions
|
||||
OCO_OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
|
||||
# customization
|
||||
@ -48,5 +49,6 @@ jobs:
|
||||
OCO_OPENAI_BASE_PATH: ''
|
||||
OCO_DESCRIPTION: false
|
||||
OCO_EMOJI: false
|
||||
OCO_MODEL: gpt-3.5-turbo
|
||||
OCO_LANGUAGE: en
|
||||
OCO_MODEL: gpt-3.5-turbo-16k
|
||||
OCO_LANGUAGE: en
|
||||
OCO_PROMPT_MODULE: conventional-commit
|
||||
29
.github/workflows/pull-request.yml
vendored
29
.github/workflows/pull-request.yml
vendored
@ -41,7 +41,9 @@ jobs:
|
||||
run: |
|
||||
git config user.name 'openimbot'
|
||||
git config user.email 'openimsdk@qq.com'
|
||||
git checkout -b cicd/patch-${{ github.event.number }}
|
||||
BRANCH_NAME="auto-pr-$(date +'%Y%m%d%H%M%S')"
|
||||
git checkout -b $BRANCH_NAME
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
@ -50,6 +52,7 @@ jobs:
|
||||
sudo make tidy
|
||||
sudo make tools.verify.go-gitlint
|
||||
echo "Run go modules tidy successfully"
|
||||
continue-on-error: true
|
||||
|
||||
- name: Run go format
|
||||
run: |
|
||||
@ -75,13 +78,8 @@ jobs:
|
||||
echo "Run unit test and get test coverage successfully"
|
||||
continue-on-error: true
|
||||
|
||||
# - name: Initialize CodeQL
|
||||
# uses: github/codeql-action/init@v2
|
||||
# with:
|
||||
# languages: go
|
||||
|
||||
# - name: Perform CodeQL Analysis
|
||||
# uses: github/codeql-action/analyze@v2
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
- name: OpenIM verify copyright
|
||||
run: |
|
||||
@ -89,31 +87,26 @@ jobs:
|
||||
sudo make add-copyright
|
||||
echo "OpenIM verify successfully"
|
||||
continue-on-error: true
|
||||
|
||||
# - name: Commit code
|
||||
# run: |
|
||||
# git add .
|
||||
# git commit -m "cicd: bump League Patch to cicd/patch-${{ github.event.number }}"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
token: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
commit-message: "cicd: bump League Patch to cicd/patch-${{ github.event.number }}"
|
||||
title: Bump League Patch to cicd/patch-${{ github.event.number }}
|
||||
commit-message: "cicd: bump League Patch"
|
||||
author: kubbot <kubbot@3293172751ysy@gmail.com>
|
||||
signoff: false
|
||||
draft: false
|
||||
branch: "cicd/patch-${{ github.event.number }}"
|
||||
branch: ''
|
||||
assignees: cubxxw
|
||||
reviewers: cubxxw
|
||||
delete-branch: true
|
||||
title: "Bump League Patch auto PR: $(date +'%Y%m%d')"
|
||||
body: |
|
||||
Review criteria:
|
||||
|
||||
- [ ] Disenchanter can connect and issue actions
|
||||
|
||||
This is an automated PR. @ ${{ github.actor }}
|
||||
This is an automated PR. @ $(date +'%Y%m%d')
|
||||
<sub>[workflow](https://github.com/openimsdk/open-im-server/blob/main/.github/workflows/pull-request.yml).</sub>
|
||||
base: main
|
||||
labels: |
|
||||
|
||||
76
.github/workflows/scripts-verify.yml
vendored
76
.github/workflows/scripts-verify.yml
vendored
@ -1,76 +0,0 @@
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: OpenIM executes the script validation code
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- "README.md"
|
||||
- "README_zh-CN.md"
|
||||
- "CONTRIBUTING.md"
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- "README.md"
|
||||
- "README_zh-CN.md"
|
||||
- "CONTRIBUTING.md"
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.19"
|
||||
GOLANGCI_VERSION: "v1.50.1"
|
||||
|
||||
jobs:
|
||||
openim:
|
||||
name: Test with go ${{ matrix.go_version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
permissions:
|
||||
contents: write
|
||||
environment:
|
||||
name: openim
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go_version: ["1.21"]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- name: Setup
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: In ${{ matrix.os }} Execute the script validation code
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go_version }}
|
||||
id: go
|
||||
|
||||
- name: scripts validation
|
||||
run: |
|
||||
sudo make verify
|
||||
continue-on-error: true
|
||||
|
||||
- name: verify format
|
||||
run: |
|
||||
sudo make format
|
||||
continue-on-error: true
|
||||
|
||||
- name: verify license
|
||||
run: |
|
||||
sudo make verify-copyright
|
||||
continue-on-error: true
|
||||
@ -60,7 +60,7 @@ representative at an online or offline event.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
3293172751nss@gmail.com.
|
||||
`security@openim.io`.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
|
||||
@ -186,7 +186,7 @@ We divide the problem into security and general problems:
|
||||
|
||||
Security issues are always treated seriously. As our usual principle, we discourage anyone to spread security issues. If you find a security issue of Open-IM-Server, please do not discuss it in public and even do not open a public issue.
|
||||
|
||||
Instead we encourage you to send us a private email to winxu81@gmail.com to report this.
|
||||
Instead we encourage you to send us a private email to info@openim.io to report this.
|
||||
|
||||
#### Reporting general issues
|
||||
|
||||
@ -369,7 +369,7 @@ Our most recommended way to get in touch is through [Slack](https://join.slack.c
|
||||
In addition to Slack, we also offer the following ways to get in touch:
|
||||
|
||||
+ <a href="https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q" target="_blank"><img src="https://img.shields.io/badge/slack-%40OpenIMSDKCore-informational?logo=slack&style=flat-square"></a>: We also have Slack channels for you to communicate and discuss. To join, visit https://slack.com/ and join our [👀 Open-IM-Server slack](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) team channel.
|
||||
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=4closetool3@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a>: Get in touch with us on [Gmail](winxu81@gmail.com). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
|
||||
+ <a href="https://mail.google.com/mail/u/0/?fs=1&tf=cm&to=4closetool3@gmail.com" target="_blank"><img src="https://img.shields.io/badge/gmail-%40OOpenIMSDKCore?style=social&logo=gmail"></a>: Get in touch with us on [Gmail](info@openim.io). If you have any questions or issues that need resolving, or any suggestions and feedback for our open source projects, please feel free to contact us via email.
|
||||
+ <a href="https://doc.rentsoft.cn/" target="_blank"><img src="https://img.shields.io/badge/%E5%8D%9A%E5%AE%A2-%40OpenIMSDKCore-blue?style=social&logo=Octopus%20Deploy"></a>: Read our [blog](https://doc.rentsoft.cn/). Our blog is a great place to stay up-to-date with Open-IM-Server projects and trends. On the blog, we share our latest developments, tech trends, and other interesting information.
|
||||
+ <a href="https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg" target="_blank"><img src="https://img.shields.io/badge/%E5%BE%AE%E4%BF%A1-OpenIMSDKCore-brightgreen?logo=wechat&style=flat-square"></a>: Add [Wechat](https://github.com/OpenIMSDK/OpenIM-Docs/blob/main/docs/images/WechatIMG20.jpeg) and indicate that you are a user or developer of Open-IM-Server. We will process your request as soon as possible.
|
||||
|
||||
|
||||
@ -17,6 +17,8 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
ginProm "github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"net"
|
||||
_ "net/http/pprof"
|
||||
"strconv"
|
||||
@ -43,11 +45,11 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func run(port int) error {
|
||||
log.ZInfo(context.Background(), "Openim api port:", "port", port)
|
||||
func run(port int, proPort int) error {
|
||||
log.ZInfo(context.Background(), "Openim api port:", "port", port, "proPort", proPort)
|
||||
|
||||
if port == 0 {
|
||||
err := "port is empty"
|
||||
if port == 0 || proPort == 0 {
|
||||
err := "port or proPort is empty:" + strconv.Itoa(port) + "," + strconv.Itoa(proPort)
|
||||
log.ZError(context.Background(), err, nil)
|
||||
|
||||
return fmt.Errorf(err)
|
||||
@ -82,6 +84,13 @@ func run(port int) error {
|
||||
}
|
||||
log.ZInfo(context.Background(), "api register public config to discov success")
|
||||
router := api.NewGinRouter(client, rdb)
|
||||
//////////////////////////////
|
||||
if config.Config.Prometheus.Enable {
|
||||
p := ginProm.NewPrometheus("app", prom_metrics.GetGinCusMetrics("Api"))
|
||||
p.SetListenAddress(fmt.Sprintf(":%d", proPort))
|
||||
p.Use(router)
|
||||
}
|
||||
/////////////////////////////////
|
||||
log.ZInfo(context.Background(), "api init router success")
|
||||
var address string
|
||||
if config.Config.Api.ListenIP != "" {
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
func main() {
|
||||
msgTransferCmd := cmd.NewMsgTransferCmd()
|
||||
msgTransferCmd.AddPrometheusPortFlag()
|
||||
msgTransferCmd.AddTransferProgressFlag()
|
||||
if err := msgTransferCmd.Exec(); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
@ -382,7 +382,9 @@ callback:
|
||||
# The number of Prometheus ports per service needs to correspond to rpcPort
|
||||
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
|
||||
prometheus:
|
||||
enable: false
|
||||
enable: true
|
||||
prometheusUrl: "https://openim.prometheus"
|
||||
apiPrometheusPort: [20100]
|
||||
userPrometheusPort: [ 20110 ]
|
||||
friendPrometheusPort: [ 20120 ]
|
||||
messagePrometheusPort: [ 20130 ]
|
||||
|
||||
@ -383,6 +383,8 @@ callback:
|
||||
# The number of ports needs to be consistent with msg_transfer_service_num in script/path_info.sh
|
||||
prometheus:
|
||||
enable: ${PROMETHEUS_ENABLE}
|
||||
prometheusUrl: ${PROMETHEUS_URL}
|
||||
apiPrometheusPort: [${API_PROM_PORT}]
|
||||
userPrometheusPort: [ ${USER_PROM_PORT} ]
|
||||
friendPrometheusPort: [ ${FRIEND_PROM_PORT} ]
|
||||
messagePrometheusPort: [ ${MESSAGE_PROM_PORT} ]
|
||||
|
||||
2
go.mod
2
go.mod
@ -37,7 +37,7 @@ require github.com/google/uuid v1.3.1
|
||||
|
||||
require (
|
||||
github.com/IBM/sarama v1.41.3
|
||||
github.com/OpenIMSDK/protocol v0.0.30
|
||||
github.com/OpenIMSDK/protocol v0.0.31
|
||||
github.com/OpenIMSDK/tools v0.0.16
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.9+incompatible
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
|
||||
4
go.sum
4
go.sum
@ -18,8 +18,8 @@ firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIw
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c=
|
||||
github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ=
|
||||
github.com/OpenIMSDK/protocol v0.0.30 h1:MiHO6PyQMR9ojBHNnSFxCHLmsoE2xZqaiYj975JiZnM=
|
||||
github.com/OpenIMSDK/protocol v0.0.30/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
|
||||
github.com/OpenIMSDK/protocol v0.0.31 h1:ax43x9aqA6EKNXNukS5MT5BSTqkUmwO4uTvbJLtzCgE=
|
||||
github.com/OpenIMSDK/protocol v0.0.31/go.mod h1:F25dFrwrIx3lkNoiuf6FkCfxuwf8L4Z8UIsdTHP/r0Y=
|
||||
github.com/OpenIMSDK/tools v0.0.16 h1:te/GIq2imCMsrRPgU9OObYKbzZ3rT08Lih/o+3QFIz0=
|
||||
github.com/OpenIMSDK/tools v0.0.16/go.mod h1:eg+q4A34Qmu73xkY0mt37FHGMCMfC6CtmOnm0kFEGFI=
|
||||
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
|
||||
|
||||
@ -39,7 +39,6 @@ import (
|
||||
"github.com/OpenIMSDK/tools/mw"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
)
|
||||
|
||||
@ -63,13 +62,6 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
|
||||
|
||||
u := NewUserApi(*userRpc)
|
||||
m := NewMessageApi(messageRpc, userRpc)
|
||||
if config.Config.Prometheus.Enable {
|
||||
prome.NewApiRequestCounter()
|
||||
prome.NewApiRequestFailedCounter()
|
||||
prome.NewApiRequestSuccessCounter()
|
||||
r.Use(prome.PrometheusMiddleware)
|
||||
r.GET("/metrics", prome.PrometheusHandler())
|
||||
}
|
||||
ParseToken := GinParseToken(rdb)
|
||||
userRouterGroup := r.Group("/user")
|
||||
{
|
||||
@ -151,6 +143,7 @@ func NewGinRouter(discov discoveryregistry.SvcDiscoveryRegistry, rdb redis.Unive
|
||||
// Third service
|
||||
thirdGroup := r.Group("/third", ParseToken)
|
||||
{
|
||||
thirdGroup.GET("/prometheus", GetPrometheus)
|
||||
t := NewThirdApi(*thirdRpc)
|
||||
thirdGroup.POST("/fcm_update_token", t.FcmUpdateToken)
|
||||
thirdGroup.POST("/set_app_badge", t.SetAppBadge)
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -118,3 +119,7 @@ func (o *ThirdApi) DeleteLogs(c *gin.Context) {
|
||||
func (o *ThirdApi) SearchLogs(c *gin.Context) {
|
||||
a2r.Call(third.ThirdClient.SearchLogs, o.Client, c)
|
||||
}
|
||||
|
||||
func GetPrometheus(c *gin.Context) {
|
||||
c.Redirect(http.StatusFound, config2.Config.Prometheus.PrometheusUrl)
|
||||
}
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
@ -70,7 +71,7 @@ type Client struct {
|
||||
IsBackground bool `json:"isBackground"`
|
||||
ctx *UserConnContext
|
||||
longConnServer LongConnServer
|
||||
closed bool
|
||||
closed atomic.Bool
|
||||
closedErr error
|
||||
token string
|
||||
}
|
||||
@ -102,18 +103,14 @@ func (c *Client) ResetClient(
|
||||
c.ctx = ctx
|
||||
c.longConnServer = longConnServer
|
||||
c.IsBackground = false
|
||||
c.closed = false
|
||||
c.closed.Store(false)
|
||||
c.closedErr = nil
|
||||
c.token = token
|
||||
}
|
||||
|
||||
func (c *Client) pingHandler(_ string) error {
|
||||
c.conn.SetReadDeadline(pongWait)
|
||||
err := c.writePongMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
_ = c.conn.SetReadDeadline(pongWait)
|
||||
return c.writePongMsg()
|
||||
}
|
||||
|
||||
func (c *Client) readMessage() {
|
||||
@ -124,9 +121,11 @@ func (c *Client) readMessage() {
|
||||
}
|
||||
c.close()
|
||||
}()
|
||||
|
||||
c.conn.SetReadLimit(maxMessageSize)
|
||||
_ = c.conn.SetReadDeadline(pongWait)
|
||||
c.conn.SetPingHandler(c.pingHandler)
|
||||
|
||||
for {
|
||||
messageType, message, returnErr := c.conn.ReadMessage()
|
||||
if returnErr != nil {
|
||||
@ -134,11 +133,13 @@ func (c *Client) readMessage() {
|
||||
c.closedErr = returnErr
|
||||
return
|
||||
}
|
||||
|
||||
log.ZDebug(c.ctx, "readMessage", "messageType", messageType)
|
||||
if c.closed == true { // 连接刚置位已经关闭,但是协程还没退出的场景
|
||||
if c.closed.Load() { // 连接刚置位已经关闭,但是协程还没退出的场景
|
||||
c.closedErr = ErrConnClosed
|
||||
return
|
||||
}
|
||||
|
||||
switch messageType {
|
||||
case MessageBinary:
|
||||
_ = c.conn.SetReadDeadline(pongWait)
|
||||
@ -150,9 +151,11 @@ func (c *Client) readMessage() {
|
||||
case MessageText:
|
||||
c.closedErr = ErrNotSupportMessageProtocol
|
||||
return
|
||||
|
||||
case PingMessage:
|
||||
err := c.writePongMsg()
|
||||
log.ZError(c.ctx, "writePongMsg", err)
|
||||
|
||||
case CloseMessage:
|
||||
c.closedErr = ErrClientClosed
|
||||
return
|
||||
@ -163,29 +166,40 @@ func (c *Client) readMessage() {
|
||||
|
||||
func (c *Client) handleMessage(message []byte) error {
|
||||
if c.IsCompress {
|
||||
var decompressErr error
|
||||
message, decompressErr = c.longConnServer.DeCompress(message)
|
||||
if decompressErr != nil {
|
||||
return utils.Wrap(decompressErr, "")
|
||||
var err error
|
||||
message, err = c.longConnServer.DeCompress(message)
|
||||
if err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
}
|
||||
var binaryReq Req
|
||||
err := c.longConnServer.Decode(message, &binaryReq)
|
||||
|
||||
var binaryReq = getReq()
|
||||
defer freeReq(binaryReq)
|
||||
|
||||
err := c.longConnServer.Decode(message, binaryReq)
|
||||
if err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
if err := c.longConnServer.Validate(binaryReq); err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
if binaryReq.SendID != c.UserID {
|
||||
return utils.Wrap(errors.New("exception conn userID not same to req userID"), binaryReq.String())
|
||||
}
|
||||
|
||||
ctx := mcontext.WithMustInfoCtx(
|
||||
[]string{binaryReq.OperationID, binaryReq.SendID, constant.PlatformIDToName(c.PlatformID), c.ctx.GetConnID()},
|
||||
)
|
||||
|
||||
log.ZDebug(ctx, "gateway req message", "req", binaryReq.String())
|
||||
var messageErr error
|
||||
var resp []byte
|
||||
|
||||
var (
|
||||
resp []byte
|
||||
messageErr error
|
||||
)
|
||||
|
||||
switch binaryReq.ReqIdentifier {
|
||||
case WSGetNewestSeq:
|
||||
resp, messageErr = c.longConnServer.GetSeq(ctx, binaryReq)
|
||||
@ -208,23 +222,29 @@ func (c *Client) handleMessage(message []byte) error {
|
||||
)
|
||||
}
|
||||
|
||||
return c.replyMessage(ctx, &binaryReq, messageErr, resp)
|
||||
return c.replyMessage(ctx, binaryReq, messageErr, resp)
|
||||
}
|
||||
|
||||
func (c *Client) setAppBackgroundStatus(ctx context.Context, req Req) ([]byte, error) {
|
||||
func (c *Client) setAppBackgroundStatus(ctx context.Context, req *Req) ([]byte, error) {
|
||||
resp, isBackground, messageErr := c.longConnServer.SetUserDeviceBackground(ctx, req)
|
||||
if messageErr != nil {
|
||||
return nil, messageErr
|
||||
}
|
||||
|
||||
c.IsBackground = isBackground
|
||||
// todo callback
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *Client) close() {
|
||||
if c.closed.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
c.closed = true
|
||||
|
||||
c.closed.Store(true)
|
||||
c.conn.Close()
|
||||
c.longConnServer.UnRegister(c)
|
||||
}
|
||||
@ -244,6 +264,7 @@ func (c *Client) replyMessage(ctx context.Context, binaryReq *Req, err error, re
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "wireBinaryMsg replyMessage", err, "resp", mReply.String())
|
||||
}
|
||||
|
||||
if binaryReq.ReqIdentifier == WsLogoutMsg {
|
||||
return errors.New("user logout")
|
||||
}
|
||||
@ -280,36 +301,42 @@ func (c *Client) KickOnlineMessage() error {
|
||||
}
|
||||
|
||||
func (c *Client) writeBinaryMsg(resp Resp) error {
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
if c.closed == true {
|
||||
if c.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
encodedBuf := bufferPool.Get().([]byte)
|
||||
resultBuf := bufferPool.Get().([]byte)
|
||||
|
||||
encodedBuf, err := c.longConnServer.Encode(resp)
|
||||
if err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
|
||||
_ = c.conn.SetWriteDeadline(writeWait)
|
||||
if c.IsCompress {
|
||||
var compressErr error
|
||||
resultBuf, compressErr = c.longConnServer.Compress(encodedBuf)
|
||||
resultBuf, compressErr := c.longConnServer.Compress(encodedBuf)
|
||||
if compressErr != nil {
|
||||
return utils.Wrap(compressErr, "")
|
||||
}
|
||||
return c.conn.WriteMessage(MessageBinary, resultBuf)
|
||||
} else {
|
||||
return c.conn.WriteMessage(MessageBinary, encodedBuf)
|
||||
}
|
||||
|
||||
return c.conn.WriteMessage(MessageBinary, encodedBuf)
|
||||
}
|
||||
|
||||
func (c *Client) writePongMsg() error {
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
if c.closed == true {
|
||||
if c.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
_ = c.conn.SetWriteDeadline(writeWait)
|
||||
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
|
||||
err := c.conn.SetWriteDeadline(writeWait)
|
||||
if err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
return c.conn.WriteMessage(PongMessage, nil)
|
||||
}
|
||||
|
||||
@ -124,7 +124,9 @@ func (c *UserConnContext) GetOperationID() string {
|
||||
}
|
||||
|
||||
func (c *UserConnContext) SetOperationID(operationID string) {
|
||||
c.Req.URL.Query().Set(OperationID, operationID)
|
||||
values := c.Req.URL.Query()
|
||||
values.Set(OperationID, operationID)
|
||||
c.Req.URL.RawQuery = values.Encode()
|
||||
}
|
||||
|
||||
func (c *UserConnContext) GetToken() string {
|
||||
|
||||
@ -33,7 +33,6 @@ import (
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
)
|
||||
|
||||
@ -69,9 +68,10 @@ func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
|
||||
s.LongConnServer = LongConnServer
|
||||
}
|
||||
|
||||
func NewServer(rpcPort int, longConnServer LongConnServer) *Server {
|
||||
func NewServer(rpcPort int, proPort int, longConnServer LongConnServer) *Server {
|
||||
return &Server{
|
||||
rpcPort: rpcPort,
|
||||
prometheusPort: proPort,
|
||||
LongConnServer: longConnServer,
|
||||
pushTerminal: []int{constant.IOSPlatformID, constant.AndroidPlatformID},
|
||||
}
|
||||
@ -158,7 +158,6 @@ func (s *Server) SuperGroupOnlineBatchPushOneMsg(
|
||||
} else {
|
||||
if utils.IsContainInt(client.PlatformID, s.pushTerminal) {
|
||||
tempT.OnlinePush = true
|
||||
prome.Inc(prome.MsgOnlinePushSuccessCounter)
|
||||
resp = append(resp, temp)
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ func RunWsAndServer(rpcPort, wsPort, prometheusPort int) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hubServer := NewServer(rpcPort, longServer)
|
||||
hubServer := NewServer(rpcPort, prometheusPort, longServer)
|
||||
go func() {
|
||||
err := hubServer.Start()
|
||||
if err != nil {
|
||||
|
||||
@ -16,6 +16,7 @@ package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/push"
|
||||
"github.com/OpenIMSDK/tools/discoveryregistry"
|
||||
@ -49,6 +50,27 @@ func (r *Req) String() string {
|
||||
return utils.StructToJsonString(tReq)
|
||||
}
|
||||
|
||||
var reqPool = sync.Pool{
|
||||
New: func() any {
|
||||
return new(Req)
|
||||
},
|
||||
}
|
||||
|
||||
func getReq() *Req {
|
||||
req := reqPool.Get().(*Req)
|
||||
req.Data = nil
|
||||
req.MsgIncr = ""
|
||||
req.OperationID = ""
|
||||
req.ReqIdentifier = 0
|
||||
req.SendID = ""
|
||||
req.Token = ""
|
||||
return req
|
||||
}
|
||||
|
||||
func freeReq(req *Req) {
|
||||
reqPool.Put(req)
|
||||
}
|
||||
|
||||
type Resp struct {
|
||||
ReqIdentifier int32 `json:"reqIdentifier"`
|
||||
MsgIncr string `json:"msgIncr"`
|
||||
@ -69,12 +91,12 @@ func (r *Resp) String() string {
|
||||
}
|
||||
|
||||
type MessageHandler interface {
|
||||
GetSeq(context context.Context, data Req) ([]byte, error)
|
||||
SendMessage(context context.Context, data Req) ([]byte, error)
|
||||
SendSignalMessage(context context.Context, data Req) ([]byte, error)
|
||||
PullMessageBySeqList(context context.Context, data Req) ([]byte, error)
|
||||
UserLogout(context context.Context, data Req) ([]byte, error)
|
||||
SetUserDeviceBackground(context context.Context, data Req) ([]byte, bool, error)
|
||||
GetSeq(context context.Context, data *Req) ([]byte, error)
|
||||
SendMessage(context context.Context, data *Req) ([]byte, error)
|
||||
SendSignalMessage(context context.Context, data *Req) ([]byte, error)
|
||||
PullMessageBySeqList(context context.Context, data *Req) ([]byte, error)
|
||||
UserLogout(context context.Context, data *Req) ([]byte, error)
|
||||
SetUserDeviceBackground(context context.Context, data *Req) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
var _ MessageHandler = (*GrpcHandler)(nil)
|
||||
@ -94,7 +116,7 @@ func NewGrpcHandler(validate *validator.Validate, client discoveryregistry.SvcDi
|
||||
}
|
||||
}
|
||||
|
||||
func (g GrpcHandler) GetSeq(context context.Context, data Req) ([]byte, error) {
|
||||
func (g GrpcHandler) GetSeq(context context.Context, data *Req) ([]byte, error) {
|
||||
req := sdkws.GetMaxSeqReq{}
|
||||
if err := proto.Unmarshal(data.Data, &req); err != nil {
|
||||
return nil, err
|
||||
@ -113,7 +135,7 @@ func (g GrpcHandler) GetSeq(context context.Context, data Req) ([]byte, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (g GrpcHandler) SendMessage(context context.Context, data Req) ([]byte, error) {
|
||||
func (g GrpcHandler) SendMessage(context context.Context, data *Req) ([]byte, error) {
|
||||
msgData := sdkws.MsgData{}
|
||||
if err := proto.Unmarshal(data.Data, &msgData); err != nil {
|
||||
return nil, err
|
||||
@ -133,7 +155,7 @@ func (g GrpcHandler) SendMessage(context context.Context, data Req) ([]byte, err
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (g GrpcHandler) SendSignalMessage(context context.Context, data Req) ([]byte, error) {
|
||||
func (g GrpcHandler) SendSignalMessage(context context.Context, data *Req) ([]byte, error) {
|
||||
resp, err := g.msgRpcClient.SendMsg(context, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -145,7 +167,7 @@ func (g GrpcHandler) SendSignalMessage(context context.Context, data Req) ([]byt
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data Req) ([]byte, error) {
|
||||
func (g GrpcHandler) PullMessageBySeqList(context context.Context, data *Req) ([]byte, error) {
|
||||
req := sdkws.PullMessageBySeqsReq{}
|
||||
if err := proto.Unmarshal(data.Data, &req); err != nil {
|
||||
return nil, err
|
||||
@ -164,7 +186,7 @@ func (g GrpcHandler) PullMessageBySeqList(context context.Context, data Req) ([]
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (g GrpcHandler) UserLogout(context context.Context, data Req) ([]byte, error) {
|
||||
func (g GrpcHandler) UserLogout(context context.Context, data *Req) ([]byte, error) {
|
||||
req := push.DelUserPushTokenReq{}
|
||||
if err := proto.Unmarshal(data.Data, &req); err != nil {
|
||||
return nil, err
|
||||
@ -180,7 +202,7 @@ func (g GrpcHandler) UserLogout(context context.Context, data Req) ([]byte, erro
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (g GrpcHandler) SetUserDeviceBackground(_ context.Context, data Req) ([]byte, bool, error) {
|
||||
func (g GrpcHandler) SetUserDeviceBackground(_ context.Context, data *Req) ([]byte, bool, error) {
|
||||
req := sdkws.SetAppBackgroundStatusReq{}
|
||||
if err := proto.Unmarshal(data.Data, &req); err != nil {
|
||||
return nil, false, err
|
||||
|
||||
@ -17,6 +17,7 @@ package msggateway
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -220,6 +221,7 @@ func (ws *WsServer) registerClient(client *Client) {
|
||||
if !userOK {
|
||||
ws.clients.Set(client.UserID, client)
|
||||
log.ZDebug(client.ctx, "user not exist", "userID", client.UserID, "platformID", client.PlatformID)
|
||||
prom_metrics.OnlineUserGauge.Add(1)
|
||||
ws.onlineUserNum.Add(1)
|
||||
ws.onlineUserConnNum.Add(1)
|
||||
} else {
|
||||
@ -364,6 +366,7 @@ func (ws *WsServer) unregisterClient(client *Client) {
|
||||
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
|
||||
if isDeleteUser {
|
||||
ws.onlineUserNum.Add(-1)
|
||||
prom_metrics.OnlineUserGauge.Dec()
|
||||
}
|
||||
ws.onlineUserConnNum.Add(-1)
|
||||
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
|
||||
|
||||
@ -15,13 +15,18 @@
|
||||
package msgtransfer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery_register"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/OpenIMSDK/tools/mw"
|
||||
|
||||
@ -31,7 +36,6 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/relation"
|
||||
relationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
)
|
||||
|
||||
@ -81,7 +85,6 @@ func StartTransfer(prometheusPort int) error {
|
||||
conversationRpcClient := rpcclient.NewConversationRpcClient(client)
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client)
|
||||
msgTransfer := NewMsgTransfer(chatLogDatabase, msgDatabase, &conversationRpcClient, &groupRpcClient)
|
||||
msgTransfer.initPrometheus()
|
||||
return msgTransfer.Start(prometheusPort)
|
||||
}
|
||||
|
||||
@ -95,21 +98,13 @@ func NewMsgTransfer(chatLogDatabase controller.ChatLogDatabase,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MsgTransfer) initPrometheus() {
|
||||
prome.NewSeqGetSuccessCounter()
|
||||
prome.NewSeqGetFailedCounter()
|
||||
prome.NewSeqSetSuccessCounter()
|
||||
prome.NewSeqSetFailedCounter()
|
||||
prome.NewMsgInsertRedisSuccessCounter()
|
||||
prome.NewMsgInsertRedisFailedCounter()
|
||||
prome.NewMsgInsertMongoSuccessCounter()
|
||||
prome.NewMsgInsertMongoFailedCounter()
|
||||
}
|
||||
|
||||
func (m *MsgTransfer) Start(prometheusPort int) error {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
fmt.Println("start msg transfer", "prometheusPort:", prometheusPort)
|
||||
if prometheusPort <= 0 {
|
||||
return errors.New("prometheusPort not correct")
|
||||
}
|
||||
if config.Config.ChatPersistenceMysql {
|
||||
// go m.persistentCH.persistentConsumerGroup.RegisterHandleAndConsumer(m.persistentCH)
|
||||
} else {
|
||||
@ -118,10 +113,21 @@ func (m *MsgTransfer) Start(prometheusPort int) error {
|
||||
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyCH)
|
||||
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.historyMongoCH)
|
||||
// go m.modifyCH.modifyMsgConsumerGroup.RegisterHandleAndConsumer(m.modifyCH)
|
||||
err := prome.StartPrometheusSrv(prometheusPort)
|
||||
/*err := prome.StartPrometheusSrv(prometheusPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}*/
|
||||
////////////////////////////
|
||||
if config.Config.Prometheus.Enable {
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
reg.MustRegister(prom_metrics.GetGrpcCusMetrics("Transfer")...)
|
||||
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
|
||||
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil))
|
||||
}
|
||||
////////////////////////////////////////
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ package msgtransfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@ -74,6 +75,9 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(
|
||||
"conversationID",
|
||||
msgFromMQ.ConversationID,
|
||||
)
|
||||
prom_metrics.MsgInsertMongoFailedCounter.Inc()
|
||||
} else {
|
||||
prom_metrics.MsgInsertMongoSuccessCounter.Inc()
|
||||
}
|
||||
var seqs []int64
|
||||
for _, msg := range msgFromMQ.MsgData {
|
||||
|
||||
@ -14,10 +14,6 @@
|
||||
|
||||
package push
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
)
|
||||
|
||||
type Consumer struct {
|
||||
pushCh ConsumerHandler
|
||||
successCount uint64
|
||||
@ -29,11 +25,6 @@ func NewConsumer(pusher *Pusher) *Consumer {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Consumer) initPrometheus() {
|
||||
prome.NewMsgOfflinePushSuccessCounter()
|
||||
prome.NewMsgOfflinePushFailedCounter()
|
||||
}
|
||||
|
||||
func (c *Consumer) Start() {
|
||||
// statistics.NewStatistics(&c.successCount, config.Config.ModuleName.PushName, fmt.Sprintf("%d second push to
|
||||
// msg_gateway count", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
|
||||
|
||||
17
internal/push/offlinepush/dummy/push.go
Normal file
17
internal/push/offlinepush/dummy/push.go
Normal file
@ -0,0 +1,17 @@
|
||||
package dummy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
)
|
||||
|
||||
func NewClient() *Dummy {
|
||||
return &Dummy{}
|
||||
}
|
||||
|
||||
type Dummy struct {
|
||||
}
|
||||
|
||||
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *offlinepush.Opts) error {
|
||||
return nil
|
||||
}
|
||||
@ -67,7 +67,6 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
consumer := NewConsumer(pusher)
|
||||
consumer.initPrometheus()
|
||||
consumer.Start()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
@ -18,6 +18,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/dummy"
|
||||
|
||||
"github.com/OpenIMSDK/protocol/conversation"
|
||||
|
||||
@ -39,7 +41,6 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
)
|
||||
|
||||
@ -82,6 +83,8 @@ func NewOfflinePusher(cache cache.MsgModel) offlinepush.OfflinePusher {
|
||||
offlinePusher = fcm.NewClient(cache)
|
||||
case "jpush":
|
||||
offlinePusher = jpush.NewClient()
|
||||
default:
|
||||
offlinePusher = dummy.NewClient()
|
||||
}
|
||||
return offlinePusher
|
||||
}
|
||||
@ -285,10 +288,9 @@ func (p *Pusher) offlinePushMsg(ctx context.Context, conversationID string, msg
|
||||
}
|
||||
err = p.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
|
||||
if err != nil {
|
||||
prome.Inc(prome.MsgOfflinePushFailedCounter)
|
||||
prom_metrics.MsgOfflinePushFailedCounter.Inc()
|
||||
return err
|
||||
}
|
||||
prome.Inc(prome.MsgOfflinePushSuccessCounter)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,7 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
|
||||
@ -73,6 +74,7 @@ func (s *authServer) UserToken(ctx context.Context, req *pbauth.UserTokenReq) (*
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prom_metrics.UserLoginCounter.Inc()
|
||||
resp.Token = token
|
||||
resp.ExpireTimeSeconds = config.Config.TokenPolicy.Expire * 24 * 60 * 60
|
||||
return &resp, nil
|
||||
|
||||
@ -475,11 +475,13 @@ func (s *groupServer) GetGroupAllMember(ctx context.Context, req *pbgroup.GetGro
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = userInfo.Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = userInfo.FaceURL
|
||||
}
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -626,11 +628,13 @@ func (s *groupServer) GetGroupMembersInfo(ctx context.Context, req *pbgroup.GetG
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = userInfo.Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = userInfo.FaceURL
|
||||
}
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -1073,18 +1077,20 @@ func (s *groupServer) GetGroupMembersCMS(ctx context.Context, req *pbgroup.GetGr
|
||||
return nil, err
|
||||
}
|
||||
resp.Total = total
|
||||
nameMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = nameMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = nameMap[e.UserID].FaceURL
|
||||
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = userInfo.Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = userInfo.FaceURL
|
||||
}
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -1471,17 +1477,19 @@ func (s *groupServer) GetUserInGroupMembers(ctx context.Context, req *pbgroup.Ge
|
||||
return nil, err
|
||||
}
|
||||
publicUserInfoMap, err := s.GetPublicUserInfoMap(ctx, utils.Filter(members, func(e *relationtb.GroupMemberModel) (string, bool) {
|
||||
return e.UserID, e.Nickname == ""
|
||||
return e.UserID, e.Nickname == "" || e.FaceURL == ""
|
||||
}), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = userInfo.Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = userInfo.FaceURL
|
||||
}
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
@ -1513,11 +1521,13 @@ func (s *groupServer) GetGroupMemberRoleLevel(ctx context.Context, req *pbgroup.
|
||||
return nil, err
|
||||
}
|
||||
resp.Members = utils.Slice(members, func(e *relationtb.GroupMemberModel) *sdkws.GroupMemberFullInfo {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = publicUserInfoMap[e.UserID].Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = publicUserInfoMap[e.UserID].FaceURL
|
||||
if userInfo, ok := publicUserInfoMap[e.UserID]; ok {
|
||||
if e.Nickname == "" {
|
||||
e.Nickname = userInfo.Nickname
|
||||
}
|
||||
if e.FaceURL == "" {
|
||||
e.FaceURL = userInfo.FaceURL
|
||||
}
|
||||
}
|
||||
return convert.Db2PbGroupMember(e)
|
||||
})
|
||||
|
||||
@ -30,17 +30,15 @@ type MessageInterceptorFunc func(ctx context.Context, req *msg.SendMsgReq) (*sdk
|
||||
func MessageHasReadEnabled(_ context.Context, req *msg.SendMsgReq) (*sdkws.MsgData, error) {
|
||||
switch {
|
||||
case req.MsgData.ContentType == constant.HasReadReceipt && req.MsgData.SessionType == constant.SingleChatType:
|
||||
if config.Config.SingleMessageHasReadReceiptEnable {
|
||||
return req.MsgData, nil
|
||||
} else {
|
||||
if !config.Config.SingleMessageHasReadReceiptEnable {
|
||||
return nil, errs.ErrMessageHasReadDisable.Wrap()
|
||||
}
|
||||
return req.MsgData, nil
|
||||
case req.MsgData.ContentType == constant.HasReadReceipt && req.MsgData.SessionType == constant.SuperGroupChatType:
|
||||
if config.Config.GroupMessageHasReadReceiptEnable {
|
||||
return req.MsgData, nil
|
||||
} else {
|
||||
if !config.Config.GroupMessageHasReadReceiptEnable {
|
||||
return nil, errs.ErrMessageHasReadDisable.Wrap()
|
||||
}
|
||||
return req.MsgData, nil
|
||||
}
|
||||
return req.MsgData, nil
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ package msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
|
||||
@ -28,8 +29,6 @@ import (
|
||||
"github.com/OpenIMSDK/tools/log"
|
||||
"github.com/OpenIMSDK/tools/mcontext"
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
|
||||
promepkg "github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
)
|
||||
|
||||
func (m *msgServer) SendMsg(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, error error) {
|
||||
@ -59,9 +58,8 @@ func (m *msgServer) sendMsgSuperGroupChat(
|
||||
ctx context.Context,
|
||||
req *pbmsg.SendMsgReq,
|
||||
) (resp *pbmsg.SendMsgResp, err error) {
|
||||
promepkg.Inc(promepkg.WorkSuperGroupChatMsgRecvSuccessCounter)
|
||||
if err = m.messageVerification(ctx, req); err != nil {
|
||||
promepkg.Inc(promepkg.WorkSuperGroupChatMsgProcessFailedCounter)
|
||||
prom_metrics.GroupChatMsgProcessFailedCounter.Inc()
|
||||
return nil, err
|
||||
}
|
||||
if err = callbackBeforeSendGroupMsg(ctx, req); err != nil {
|
||||
@ -80,7 +78,7 @@ func (m *msgServer) sendMsgSuperGroupChat(
|
||||
if err = callbackAfterSendGroupMsg(ctx, req); err != nil {
|
||||
log.ZWarn(ctx, "CallbackAfterSendGroupMsg", err)
|
||||
}
|
||||
promepkg.Inc(promepkg.WorkSuperGroupChatMsgProcessSuccessCounter)
|
||||
prom_metrics.GroupChatMsgProcessSuccessCounter.Inc()
|
||||
resp = &pbmsg.SendMsgResp{}
|
||||
resp.SendTime = req.MsgData.SendTime
|
||||
resp.ServerMsgID = req.MsgData.ServerMsgID
|
||||
@ -133,9 +131,7 @@ func (m *msgServer) sendMsgNotification(
|
||||
ctx context.Context,
|
||||
req *pbmsg.SendMsgReq,
|
||||
) (resp *pbmsg.SendMsgResp, err error) {
|
||||
promepkg.Inc(promepkg.SingleChatMsgRecvSuccessCounter)
|
||||
if err := m.MsgDatabase.MsgToMQ(ctx, utils.GenConversationUniqueKeyForSingle(req.MsgData.SendID, req.MsgData.RecvID), req.MsgData); err != nil {
|
||||
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
|
||||
return nil, err
|
||||
}
|
||||
resp = &pbmsg.SendMsgResp{
|
||||
@ -147,7 +143,6 @@ func (m *msgServer) sendMsgNotification(
|
||||
}
|
||||
|
||||
func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq) (resp *pbmsg.SendMsgResp, err error) {
|
||||
promepkg.Inc(promepkg.SingleChatMsgRecvSuccessCounter)
|
||||
if err := m.messageVerification(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -166,7 +161,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
|
||||
}
|
||||
}
|
||||
if !isSend {
|
||||
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
|
||||
prom_metrics.SingleChatMsgProcessFailedCounter.Inc()
|
||||
return nil, nil
|
||||
} else {
|
||||
if err = callbackBeforeSendSingleMsg(ctx, req); err != nil {
|
||||
@ -176,7 +171,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
|
||||
return nil, err
|
||||
}
|
||||
if err := m.MsgDatabase.MsgToMQ(ctx, utils.GenConversationUniqueKeyForSingle(req.MsgData.SendID, req.MsgData.RecvID), req.MsgData); err != nil {
|
||||
promepkg.Inc(promepkg.SingleChatMsgProcessFailedCounter)
|
||||
prom_metrics.SingleChatMsgProcessFailedCounter.Inc()
|
||||
return nil, err
|
||||
}
|
||||
err = callbackAfterSendSingleMsg(ctx, req)
|
||||
@ -188,7 +183,7 @@ func (m *msgServer) sendMsgSingleChat(ctx context.Context, req *pbmsg.SendMsgReq
|
||||
ClientMsgID: req.MsgData.ClientMsgID,
|
||||
SendTime: req.MsgData.SendTime,
|
||||
}
|
||||
promepkg.Inc(promepkg.SingleChatMsgProcessSuccessCounter)
|
||||
prom_metrics.SingleChatMsgProcessSuccessCounter.Inc()
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,7 +28,6 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
)
|
||||
|
||||
@ -94,27 +93,10 @@ func Start(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) e
|
||||
}
|
||||
s.notificationSender = rpcclient.NewNotificationSender(rpcclient.WithLocalSendMsg(s.SendMsg))
|
||||
s.addInterceptorHandler(MessageHasReadEnabled)
|
||||
s.initPrometheus()
|
||||
msg.RegisterMsgServer(server, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *msgServer) initPrometheus() {
|
||||
prome.NewMsgPullFromRedisSuccessCounter()
|
||||
prome.NewMsgPullFromRedisFailedCounter()
|
||||
prome.NewMsgPullFromMongoSuccessCounter()
|
||||
prome.NewMsgPullFromMongoFailedCounter()
|
||||
prome.NewSingleChatMsgRecvSuccessCounter()
|
||||
prome.NewGroupChatMsgRecvSuccessCounter()
|
||||
prome.NewWorkSuperGroupChatMsgRecvSuccessCounter()
|
||||
prome.NewSingleChatMsgProcessSuccessCounter()
|
||||
prome.NewSingleChatMsgProcessFailedCounter()
|
||||
prome.NewGroupChatMsgProcessSuccessCounter()
|
||||
prome.NewGroupChatMsgProcessFailedCounter()
|
||||
prome.NewWorkSuperGroupChatMsgProcessSuccessCounter()
|
||||
prome.NewWorkSuperGroupChatMsgProcessFailedCounter()
|
||||
}
|
||||
|
||||
func (m *msgServer) conversationAndGetRecvID(conversation *conversation.Conversation, userID string) (recvID string) {
|
||||
if conversation.ConversationType == constant.SingleChatType ||
|
||||
conversation.ConversationType == constant.NotificationChatType {
|
||||
|
||||
@ -34,9 +34,9 @@ func NewApiCmd() *ApiCmd {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *ApiCmd) AddApi(f func(port int) error) {
|
||||
func (a *ApiCmd) AddApi(f func(port int, promPort int) error) {
|
||||
a.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return f(a.getPortFlag(cmd))
|
||||
return f(a.getPortFlag(cmd), a.getPrometheusPortFlag(cmd))
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,8 +44,8 @@ func (a *ApiCmd) GetPortFromConfig(portType string) int {
|
||||
fmt.Println("GetPortFromConfig:", portType)
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.Api.OpenImApiPort[0]
|
||||
} else {
|
||||
|
||||
return 0
|
||||
} else if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.ApiPrometheusPort[0]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -66,7 +66,7 @@ func (m *MsgGatewayCmd) GetPortFromConfig(portType string) int {
|
||||
} else if portType == constant.FlagPort {
|
||||
return v3config.Config.LongConnSvr.OpenImMessageGatewayPort[0]
|
||||
} else if portType == constant.FlagPrometheusPort {
|
||||
return 0
|
||||
return v3config.Config.Prometheus.MessageGatewayPrometheusPort[0]
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -15,6 +15,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/OpenIMSDK/protocol/constant"
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/msgtransfer"
|
||||
@ -40,3 +43,25 @@ func (m *MsgTransferCmd) Exec() error {
|
||||
m.addRunE()
|
||||
return m.Execute()
|
||||
}
|
||||
|
||||
func (m *MsgTransferCmd) GetPortFromConfig(portType string) int {
|
||||
fmt.Println("GetPortFromConfig:", portType)
|
||||
if portType == constant.FlagPort {
|
||||
return 0
|
||||
} else if portType == constant.FlagPrometheusPort {
|
||||
n := m.getTransferProgressFlagValue()
|
||||
return config2.Config.Prometheus.MessageTransferPrometheusPort[n]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (m *MsgTransferCmd) AddTransferProgressFlag() {
|
||||
m.Command.Flags().IntP(constant.FlagTransferProgressIndex, "n", 0, "transfer progress index")
|
||||
}
|
||||
func (m *MsgTransferCmd) getTransferProgressFlagValue() int {
|
||||
nindex, err := m.Command.Flags().GetInt(constant.FlagTransferProgressIndex)
|
||||
if err != nil {
|
||||
fmt.Println("get transfercmd error,make sure it is k8s env or not")
|
||||
return 0
|
||||
}
|
||||
return nindex
|
||||
}
|
||||
|
||||
@ -61,34 +61,58 @@ func (a *RpcCmd) GetPortFromConfig(portType string) int {
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImPushPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.PushPrometheusPort[0]
|
||||
}
|
||||
case RpcAuthServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImAuthPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.AuthPrometheusPort[0]
|
||||
}
|
||||
case RpcConversationServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImConversationPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.ConversationPrometheusPort[0]
|
||||
}
|
||||
case RpcFriendServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImFriendPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.FriendPrometheusPort[0]
|
||||
}
|
||||
case RpcGroupServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImGroupPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.GroupPrometheusPort[0]
|
||||
}
|
||||
case RpcMsgServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImMessagePort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.MessagePrometheusPort[0]
|
||||
}
|
||||
case RpcThirdServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImThirdPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.ThirdPrometheusPort[0]
|
||||
}
|
||||
case RpcUserServer:
|
||||
if portType == constant.FlagPort {
|
||||
return config2.Config.RpcPort.OpenImUserPort[0]
|
||||
}
|
||||
if portType == constant.FlagPrometheusPort {
|
||||
return config2.Config.Prometheus.UserPrometheusPort[0]
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -262,18 +262,20 @@ type configStruct struct {
|
||||
} `yaml:"callback"`
|
||||
|
||||
Prometheus struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
UserPrometheusPort []int `yaml:"userPrometheusPort"`
|
||||
FriendPrometheusPort []int `yaml:"friendPrometheusPort"`
|
||||
MessagePrometheusPort []int `yaml:"messagePrometheusPort"`
|
||||
MessageGatewayPrometheusPort []int `yaml:"messageGatewayPrometheusPort"`
|
||||
GroupPrometheusPort []int `yaml:"groupPrometheusPort"`
|
||||
AuthPrometheusPort []int `yaml:"authPrometheusPort"`
|
||||
PushPrometheusPort []int `yaml:"pushPrometheusPort"`
|
||||
ConversationPrometheusPort []int `yaml:"conversationPrometheusPort"`
|
||||
RtcPrometheusPort []int `yaml:"rtcPrometheusPort"`
|
||||
MessageTransferPrometheusPort []int `yaml:"messageTransferPrometheusPort"`
|
||||
ThirdPrometheusPort []int `yaml:"thirdPrometheusPort"`
|
||||
Enable bool `yaml:"enable"`
|
||||
PrometheusUrl string `yaml:"prometheusUrl"`
|
||||
ApiPrometheusPort []int `yaml:"apiPrometheusPort"`
|
||||
UserPrometheusPort []int `yaml:"userPrometheusPort"`
|
||||
FriendPrometheusPort []int `yaml:"friendPrometheusPort"`
|
||||
MessagePrometheusPort []int `yaml:"messagePrometheusPort"`
|
||||
MessageGatewayPrometheusPort []int `yaml:"messageGatewayPrometheusPort"`
|
||||
GroupPrometheusPort []int `yaml:"groupPrometheusPort"`
|
||||
AuthPrometheusPort []int `yaml:"authPrometheusPort"`
|
||||
PushPrometheusPort []int `yaml:"pushPrometheusPort"`
|
||||
ConversationPrometheusPort []int `yaml:"conversationPrometheusPort"`
|
||||
RtcPrometheusPort []int `yaml:"rtcPrometheusPort"`
|
||||
MessageTransferPrometheusPort []int `yaml:"messageTransferPrometheusPort"`
|
||||
ThirdPrometheusPort []int `yaml:"thirdPrometheusPort"`
|
||||
} `yaml:"prometheus"`
|
||||
Notification notification `yaml:"notification"`
|
||||
}
|
||||
|
||||
@ -35,6 +35,16 @@ const (
|
||||
DefaultFolderPath = "../config/"
|
||||
)
|
||||
|
||||
// return absolude path join ../config/, this is k8s container config path
|
||||
func GetDefaultConfigPath() string {
|
||||
b, err := filepath.Abs(os.Args[0])
|
||||
if err != nil {
|
||||
fmt.Println("filepath.Abs error,err=", err)
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(filepath.Dir(b), "../config/")
|
||||
}
|
||||
|
||||
// getProjectRoot returns the absolute path of the project root directory
|
||||
func GetProjectRoot() string {
|
||||
b, _ := filepath.Abs(os.Args[0])
|
||||
@ -65,9 +75,11 @@ func initConfig(config interface{}, configName, configFolderPath string) error {
|
||||
_, err := os.Stat(configFolderPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fmt.Println("stat config path error:", err.Error())
|
||||
return fmt.Errorf("stat config path error: %w", err)
|
||||
}
|
||||
configFolderPath = filepath.Join(GetProjectRoot(), "config", configName)
|
||||
fmt.Println("flag's path,enviment's path,default path all is not exist,using project path:", configFolderPath)
|
||||
}
|
||||
data, err := os.ReadFile(configFolderPath)
|
||||
if err != nil {
|
||||
@ -86,7 +98,7 @@ func InitConfig(configFolderPath string) error {
|
||||
if envConfigPath != "" {
|
||||
configFolderPath = envConfigPath
|
||||
} else {
|
||||
configFolderPath = DefaultFolderPath
|
||||
configFolderPath = GetDefaultConfigPath()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -74,6 +74,9 @@ type conversationDatabase struct {
|
||||
|
||||
func (c *conversationDatabase) SetUsersConversationFiledTx(ctx context.Context, userIDs []string, conversation *relationtb.ConversationModel, filedMap map[string]interface{}) (err error) {
|
||||
cache := c.cache.NewCache()
|
||||
if conversation.GroupID != "" {
|
||||
cache = cache.DelSuperGroupRecvMsgNotNotifyUserIDs(conversation.GroupID).DelSuperGroupRecvMsgNotNotifyUserIDsHash(conversation.GroupID)
|
||||
}
|
||||
if err := c.tx.Transaction(func(tx any) error {
|
||||
conversationTx := c.conversationDB.NewTx(tx)
|
||||
haveUserIDs, err := conversationTx.FindUserID(ctx, userIDs, []string{conversation.ConversationID})
|
||||
@ -201,6 +204,13 @@ func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, owner
|
||||
|
||||
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.ConversationModel) error {
|
||||
cache := c.cache.NewCache()
|
||||
|
||||
groupIDs := utils.Distinct(utils.Filter(conversations, func(e *relationtb.ConversationModel) (string, bool) {
|
||||
return e.GroupID, e.GroupID != ""
|
||||
}))
|
||||
for _, groupID := range groupIDs {
|
||||
cache = cache.DelSuperGroupRecvMsgNotNotifyUserIDs(groupID).DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID)
|
||||
}
|
||||
if err := c.tx.Transaction(func(tx any) error {
|
||||
var conversationIDs []string
|
||||
for _, conversation := range conversations {
|
||||
|
||||
@ -17,6 +17,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -30,8 +31,6 @@ import (
|
||||
unrelationtb "github.com/openimsdk/open-im-server/v3/pkg/common/db/table/unrelation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/db/unrelation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/kafka"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
pbmsg "github.com/OpenIMSDK/protocol/msg"
|
||||
@ -355,10 +354,9 @@ func (db *commonMsgDatabase) DelUserDeleteMsgsList(ctx context.Context, conversa
|
||||
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
|
||||
currentMaxSeq, err := db.cache.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
prome.Inc(prome.SeqGetFailedCounter)
|
||||
log.ZError(ctx, "db.cache.GetMaxSeq", err)
|
||||
return 0, false, err
|
||||
}
|
||||
prome.Inc(prome.SeqGetSuccessCounter)
|
||||
lenList := len(msgs)
|
||||
if int64(lenList) > db.msg.GetSingleGocMsgNum() {
|
||||
return 0, false, errors.New("too large")
|
||||
@ -378,23 +376,20 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
|
||||
}
|
||||
failedNum, err := db.cache.SetMessageToCache(ctx, conversationID, msgs)
|
||||
if err != nil {
|
||||
prome.Add(prome.MsgInsertRedisFailedCounter, failedNum)
|
||||
prom_metrics.MsgInsertRedisFailedCounter.Add(float64(failedNum))
|
||||
log.ZError(ctx, "setMessageToCache error", err, "len", len(msgs), "conversationID", conversationID)
|
||||
} else {
|
||||
prome.Inc(prome.MsgInsertRedisSuccessCounter)
|
||||
prom_metrics.MsgInsertRedisSuccessCounter.Inc()
|
||||
}
|
||||
err = db.cache.SetMaxSeq(ctx, conversationID, currentMaxSeq)
|
||||
if err != nil {
|
||||
prome.Inc(prome.SeqSetFailedCounter)
|
||||
} else {
|
||||
prome.Inc(prome.SeqSetSuccessCounter)
|
||||
log.ZError(ctx, "db.cache.SetMaxSeq error", err, "conversationID", conversationID)
|
||||
prom_metrics.SeqSetFailedCounter.Inc()
|
||||
}
|
||||
err2 := db.cache.SetHasReadSeqs(ctx, conversationID, userSeqMap)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "SetHasReadSeqs error", err2, "userSeqMap", userSeqMap, "conversationID", conversationID)
|
||||
prome.Inc(prome.SeqSetFailedCounter)
|
||||
} else {
|
||||
prome.Inc(prome.SeqSetSuccessCounter)
|
||||
prom_metrics.SeqSetFailedCounter.Inc()
|
||||
}
|
||||
return lastMaxSeq, isNew, utils.Wrap(err, "")
|
||||
}
|
||||
@ -493,7 +488,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
cachedMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
|
||||
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
|
||||
}
|
||||
}
|
||||
@ -530,7 +525,7 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
cachedMsgs, failedSeqs2, err := db.cache.GetMessagesBySeq(ctx, conversationID, reGetSeqsCache)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs2))
|
||||
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", reGetSeqsCache)
|
||||
}
|
||||
}
|
||||
@ -543,14 +538,14 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
log.ZDebug(ctx, "msgs not exist in redis", "seqs", failedSeqs)
|
||||
}
|
||||
// get from cache or db
|
||||
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
|
||||
|
||||
if len(failedSeqs) > 0 {
|
||||
mongoMsgs, err := db.getMsgBySeqsRange(ctx, userID, conversationID, failedSeqs, begin, end)
|
||||
if err != nil {
|
||||
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
|
||||
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
|
||||
|
||||
successMsgs = append(successMsgs, mongoMsgs...)
|
||||
}
|
||||
|
||||
@ -582,7 +577,6 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
|
||||
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, conversationID, newSeqs)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
|
||||
log.ZError(ctx, "get message from redis exception", err, "failedSeqs", failedSeqs, "conversationID", conversationID)
|
||||
}
|
||||
}
|
||||
@ -602,14 +596,14 @@ func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, co
|
||||
"conversationID",
|
||||
conversationID,
|
||||
)
|
||||
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
|
||||
|
||||
if len(failedSeqs) > 0 {
|
||||
mongoMsgs, err := db.getMsgBySeqs(ctx, userID, conversationID, failedSeqs)
|
||||
if err != nil {
|
||||
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
|
||||
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
|
||||
|
||||
successMsgs = append(successMsgs, mongoMsgs...)
|
||||
}
|
||||
return minSeq, maxSeq, successMsgs, nil
|
||||
|
||||
@ -143,7 +143,7 @@ func (c *ConversationGorm) FindRecvMsgNotNotifyUserIDs(
|
||||
return userIDs, utils.Wrap(
|
||||
c.db(ctx).
|
||||
Where("group_id = ? and recv_msg_opt = ?", groupID, constant.ReceiveNotNotifyMessage).
|
||||
Pluck("user_id", &userIDs).
|
||||
Pluck("owner_user_id", &userIDs).
|
||||
Error,
|
||||
"",
|
||||
)
|
||||
@ -156,7 +156,7 @@ func (c *ConversationGorm) FindSuperGroupRecvMsgNotNotifyUserIDs(
|
||||
return userIDs, utils.Wrap(
|
||||
c.db(ctx).
|
||||
Where("group_id = ? and recv_msg_opt = ? and conversation_type = ?", groupID, constant.ReceiveNotNotifyMessage, constant.SuperGroupChatType).
|
||||
Pluck("user_id", &userIDs).
|
||||
Pluck("owner_user_id", &userIDs).
|
||||
Error,
|
||||
"",
|
||||
)
|
||||
|
||||
417
pkg/common/ginPrometheus/ginPrometheus.go
Normal file
417
pkg/common/ginPrometheus/ginPrometheus.go
Normal file
@ -0,0 +1,417 @@
|
||||
package ginPrometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var defaultMetricPath = "/metrics"
|
||||
|
||||
// counter, counter_vec, gauge, gauge_vec,
|
||||
// histogram, histogram_vec, summary, summary_vec
|
||||
var reqCnt = &Metric{
|
||||
ID: "reqCnt",
|
||||
Name: "requests_total",
|
||||
Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"code", "method", "handler", "host", "url"}}
|
||||
|
||||
var reqDur = &Metric{
|
||||
ID: "reqDur",
|
||||
Name: "request_duration_seconds",
|
||||
Description: "The HTTP request latencies in seconds.",
|
||||
Type: "histogram_vec",
|
||||
Args: []string{"code", "method", "url"},
|
||||
}
|
||||
|
||||
var resSz = &Metric{
|
||||
ID: "resSz",
|
||||
Name: "response_size_bytes",
|
||||
Description: "The HTTP response sizes in bytes.",
|
||||
Type: "summary"}
|
||||
|
||||
var reqSz = &Metric{
|
||||
ID: "reqSz",
|
||||
Name: "request_size_bytes",
|
||||
Description: "The HTTP request sizes in bytes.",
|
||||
Type: "summary"}
|
||||
|
||||
var standardMetrics = []*Metric{
|
||||
reqCnt,
|
||||
reqDur,
|
||||
resSz,
|
||||
reqSz,
|
||||
}
|
||||
|
||||
/*
|
||||
RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
|
||||
the cardinality of the request counter's "url" label, which might be required in some contexts.
|
||||
For instance, if for a "/customer/:name" route you don't want to generate a time series for every
|
||||
possible customer name, you could use this function:
|
||||
|
||||
func(c *gin.Context) string {
|
||||
url := c.Request.URL.Path
|
||||
for _, p := range c.Params {
|
||||
if p.Key == "name" {
|
||||
url = strings.Replace(url, p.Value, ":name", 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
|
||||
*/
|
||||
type RequestCounterURLLabelMappingFn func(c *gin.Context) string
|
||||
|
||||
// Metric is a definition for the name, description, type, ID, and
|
||||
// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric
|
||||
type Metric struct {
|
||||
MetricCollector prometheus.Collector
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
Type string
|
||||
Args []string
|
||||
}
|
||||
|
||||
// Prometheus contains the metrics gathered by the instance and its path
|
||||
type Prometheus struct {
|
||||
reqCnt *prometheus.CounterVec
|
||||
reqDur *prometheus.HistogramVec
|
||||
reqSz, resSz prometheus.Summary
|
||||
router *gin.Engine
|
||||
listenAddress string
|
||||
Ppg PrometheusPushGateway
|
||||
|
||||
MetricsList []*Metric
|
||||
MetricsPath string
|
||||
|
||||
ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
|
||||
|
||||
// gin.Context string to use as a prometheus URL label
|
||||
URLLabelFromContext string
|
||||
}
|
||||
|
||||
// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional)
|
||||
type PrometheusPushGateway struct {
|
||||
|
||||
// Push interval in seconds
|
||||
PushIntervalSeconds time.Duration
|
||||
|
||||
// Push Gateway URL in format http://domain:port
|
||||
// where JOBNAME can be any string of your choice
|
||||
PushGatewayURL string
|
||||
|
||||
// Local metrics URL where metrics are fetched from, this could be ommited in the future
|
||||
// if implemented using prometheus common/expfmt instead
|
||||
MetricsURL string
|
||||
|
||||
// pushgateway job name, defaults to "gin"
|
||||
Job string
|
||||
}
|
||||
|
||||
// NewPrometheus generates a new set of metrics with a certain subsystem name
|
||||
func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
|
||||
subsystem = "app"
|
||||
|
||||
var metricsList []*Metric
|
||||
|
||||
if len(customMetricsList) > 1 {
|
||||
panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
|
||||
} else if len(customMetricsList) == 1 {
|
||||
metricsList = customMetricsList[0]
|
||||
}
|
||||
|
||||
for _, metric := range standardMetrics {
|
||||
metricsList = append(metricsList, metric)
|
||||
}
|
||||
|
||||
p := &Prometheus{
|
||||
MetricsList: metricsList,
|
||||
MetricsPath: defaultMetricPath,
|
||||
ReqCntURLLabelMappingFn: func(c *gin.Context) string {
|
||||
return c.Request.URL.Path
|
||||
},
|
||||
}
|
||||
|
||||
p.registerMetrics(subsystem)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
|
||||
// every pushIntervalSeconds. Metrics are fetched from metricsURL
|
||||
func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
|
||||
p.Ppg.PushGatewayURL = pushGatewayURL
|
||||
p.Ppg.MetricsURL = metricsURL
|
||||
p.Ppg.PushIntervalSeconds = pushIntervalSeconds
|
||||
p.startPushTicker()
|
||||
}
|
||||
|
||||
// SetPushGatewayJob job name, defaults to "gin"
|
||||
func (p *Prometheus) SetPushGatewayJob(j string) {
|
||||
p.Ppg.Job = j
|
||||
}
|
||||
|
||||
// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
|
||||
// same address of the gin engine that is being used
|
||||
func (p *Prometheus) SetListenAddress(address string) {
|
||||
p.listenAddress = address
|
||||
if p.listenAddress != "" {
|
||||
p.router = gin.Default()
|
||||
}
|
||||
}
|
||||
|
||||
// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
|
||||
// your content's access log).
|
||||
func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
|
||||
p.listenAddress = listenAddress
|
||||
if len(p.listenAddress) > 0 {
|
||||
p.router = r
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetricsPath set metrics paths
|
||||
func (p *Prometheus) SetMetricsPath(e *gin.Engine) {
|
||||
|
||||
if p.listenAddress != "" {
|
||||
p.router.GET(p.MetricsPath, prometheusHandler())
|
||||
p.runServer()
|
||||
} else {
|
||||
e.GET(p.MetricsPath, prometheusHandler())
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetricsPathWithAuth set metrics paths with authentication
|
||||
func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) {
|
||||
|
||||
if p.listenAddress != "" {
|
||||
p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
p.runServer()
|
||||
} else {
|
||||
e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Prometheus) runServer() {
|
||||
if p.listenAddress != "" {
|
||||
go p.router.Run(p.listenAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Prometheus) getMetrics() []byte {
|
||||
response, _ := http.Get(p.Ppg.MetricsURL)
|
||||
|
||||
defer response.Body.Close()
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
func (p *Prometheus) getPushGatewayURL() string {
|
||||
h, _ := os.Hostname()
|
||||
if p.Ppg.Job == "" {
|
||||
p.Ppg.Job = "gin"
|
||||
}
|
||||
return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + h
|
||||
}
|
||||
|
||||
func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
|
||||
req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
|
||||
client := &http.Client{}
|
||||
if _, err = client.Do(req); err != nil {
|
||||
fmt.Println("Error sending to push gateway error:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Prometheus) startPushTicker() {
|
||||
ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
p.sendMetricsToPushGateway(p.getMetrics())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// NewMetric associates prometheus.Collector based on Metric.Type
|
||||
func NewMetric(m *Metric, subsystem string) prometheus.Collector {
|
||||
var metric prometheus.Collector
|
||||
switch m.Type {
|
||||
case "counter_vec":
|
||||
metric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "counter":
|
||||
metric = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "gauge_vec":
|
||||
metric = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "gauge":
|
||||
metric = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "histogram_vec":
|
||||
metric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "histogram":
|
||||
metric = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "summary_vec":
|
||||
metric = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "summary":
|
||||
metric = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
func (p *Prometheus) registerMetrics(subsystem string) {
|
||||
|
||||
for _, metricDef := range p.MetricsList {
|
||||
metric := NewMetric(metricDef, subsystem)
|
||||
if err := prometheus.Register(metric); err != nil {
|
||||
fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
|
||||
}
|
||||
switch metricDef {
|
||||
case reqCnt:
|
||||
p.reqCnt = metric.(*prometheus.CounterVec)
|
||||
case reqDur:
|
||||
p.reqDur = metric.(*prometheus.HistogramVec)
|
||||
case resSz:
|
||||
p.resSz = metric.(prometheus.Summary)
|
||||
case reqSz:
|
||||
p.reqSz = metric.(prometheus.Summary)
|
||||
}
|
||||
metricDef.MetricCollector = metric
|
||||
}
|
||||
}
|
||||
|
||||
// Use adds the middleware to a gin engine.
|
||||
func (p *Prometheus) Use(e *gin.Engine) {
|
||||
e.Use(p.HandlerFunc())
|
||||
p.SetMetricsPath(e)
|
||||
}
|
||||
|
||||
// UseWithAuth adds the middleware to a gin engine with BasicAuth.
|
||||
func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) {
|
||||
e.Use(p.HandlerFunc())
|
||||
p.SetMetricsPathWithAuth(e, accounts)
|
||||
}
|
||||
|
||||
// HandlerFunc defines handler function for middleware
|
||||
func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Request.URL.Path == p.MetricsPath {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
reqSz := computeApproximateRequestSize(c.Request)
|
||||
|
||||
c.Next()
|
||||
|
||||
status := strconv.Itoa(c.Writer.Status())
|
||||
elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
resSz := float64(c.Writer.Size())
|
||||
|
||||
url := p.ReqCntURLLabelMappingFn(c)
|
||||
if len(p.URLLabelFromContext) > 0 {
|
||||
u, found := c.Get(p.URLLabelFromContext)
|
||||
if !found {
|
||||
u = "unknown"
|
||||
}
|
||||
url = u.(string)
|
||||
}
|
||||
p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
|
||||
p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
|
||||
p.reqSz.Observe(float64(reqSz))
|
||||
p.resSz.Observe(resSz)
|
||||
}
|
||||
}
|
||||
|
||||
func prometheusHandler() gin.HandlerFunc {
|
||||
h := promhttp.Handler()
|
||||
return func(c *gin.Context) {
|
||||
h.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) int {
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
s = len(r.URL.Path)
|
||||
}
|
||||
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@ -31,14 +31,25 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
)
|
||||
|
||||
var client http.Client
|
||||
var (
|
||||
// define http client.
|
||||
client = &http.Client{
|
||||
Timeout: 15 * time.Second, // max timeout is 15s
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// reset http default transport
|
||||
http.DefaultTransport.(*http.Transport).MaxConnsPerHost = 100 // default: 2
|
||||
}
|
||||
|
||||
func Get(url string) (response []byte, err error) {
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
resp, err := client.Get(url)
|
||||
hclient := http.Client{Timeout: 5 * time.Second}
|
||||
resp, err := hclient.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
@ -47,26 +58,23 @@ func Get(url string) (response []byte, err error) {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func Post(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
header map[string]string,
|
||||
data interface{},
|
||||
timeout int,
|
||||
) (content []byte, err error) {
|
||||
func Post(ctx context.Context, url string, header map[string]string, data interface{}, timeout int) (content []byte, err error) {
|
||||
if timeout > 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Second*time.Duration(timeout))
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
jsonStr, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonStr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if operationID, _ := ctx.Value(constant.OperationID).(string); operationID != "" {
|
||||
req.Header.Set(constant.OperationID, operationID)
|
||||
}
|
||||
@ -74,25 +82,22 @@ func Post(
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
req.Header.Add("content-type", "application/json; charset=utf-8")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
result, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func PostReturn(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
header map[string]string,
|
||||
input, output interface{},
|
||||
timeOutSecond int,
|
||||
) error {
|
||||
func PostReturn(ctx context.Context, url string, header map[string]string, input, output interface{}, timeOutSecond int) error {
|
||||
b, err := Post(ctx, url, header, input, timeOutSecond)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -101,17 +106,13 @@ func PostReturn(
|
||||
return err
|
||||
}
|
||||
|
||||
func callBackPostReturn(
|
||||
ctx context.Context,
|
||||
url, command string,
|
||||
input interface{},
|
||||
output callbackstruct.CallbackResp,
|
||||
callbackConfig config.CallBackConfig,
|
||||
) error {
|
||||
func callBackPostReturn(ctx context.Context, url, command string, input interface{}, output callbackstruct.CallbackResp, callbackConfig config.CallBackConfig) error {
|
||||
defer log.ZDebug(ctx, "callback", "url", url, "command", command, "input", input, "callbackConfig", callbackConfig)
|
||||
|
||||
v := urllib.Values{}
|
||||
v.Set(constant.CallbackCommand, command)
|
||||
url = url + "?" + v.Encode()
|
||||
|
||||
b, err := Post(ctx, url, nil, input, callbackConfig.CallbackTimeOut)
|
||||
if err != nil {
|
||||
if callbackConfig.CallbackFailedContinue != nil && *callbackConfig.CallbackFailedContinue {
|
||||
@ -120,6 +121,7 @@ func callBackPostReturn(
|
||||
}
|
||||
return errs.ErrNetwork.Wrap(err.Error())
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(b, output); err != nil {
|
||||
if callbackConfig.CallbackFailedContinue != nil && *callbackConfig.CallbackFailedContinue {
|
||||
log.ZWarn(ctx, "callback failed but continue", err, "url", url)
|
||||
@ -127,15 +129,10 @@ func callBackPostReturn(
|
||||
}
|
||||
return errs.ErrData.Wrap(err.Error())
|
||||
}
|
||||
|
||||
return output.Parse()
|
||||
}
|
||||
|
||||
func CallBackPostReturn(
|
||||
ctx context.Context,
|
||||
url string,
|
||||
req callbackstruct.CallbackReq,
|
||||
resp callbackstruct.CallbackResp,
|
||||
callbackConfig config.CallBackConfig,
|
||||
) error {
|
||||
func CallBackPostReturn(ctx context.Context, url string, req callbackstruct.CallbackReq, resp callbackstruct.CallbackResp, callbackConfig config.CallBackConfig) error {
|
||||
return callBackPostReturn(ctx, url, req.GetCallbackCommand(), req, resp, callbackConfig)
|
||||
}
|
||||
|
||||
@ -28,8 +28,6 @@ import (
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
prome "github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -131,8 +129,8 @@ func (p *Producer) SendMessage(ctx context.Context, key string, msg proto.Messag
|
||||
kMsg.Headers = header
|
||||
partition, offset, err := p.producer.SendMessage(kMsg)
|
||||
log.ZDebug(ctx, "ByteEncoder SendMessage end", "key ", kMsg.Key, "key length", kMsg.Value.Length())
|
||||
if err == nil {
|
||||
prome.Inc(prome.SendMsgCounter)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "p.producer.SendMessage error", err)
|
||||
}
|
||||
return partition, offset, utils.Wrap(err, "")
|
||||
}
|
||||
|
||||
45
pkg/common/prom_metrics/func.go
Normal file
45
pkg/common/prom_metrics/func.go
Normal file
@ -0,0 +1,45 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
func NewGrpcPromObj(cusMetrics []prometheus.Collector) (*prometheus.Registry, *grpc_prometheus.ServerMetrics, error) {
|
||||
////////////////////////////////////////////////////////
|
||||
reg := prometheus.NewRegistry()
|
||||
grpcMetrics := grpc_prometheus.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
cusMetrics = append(cusMetrics, grpcMetrics, collectors.NewGoCollector())
|
||||
reg.MustRegister(cusMetrics...)
|
||||
return reg, grpcMetrics, nil
|
||||
}
|
||||
|
||||
func GetGrpcCusMetrics(registerName string) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case config2.Config.RpcRegisterName.OpenImMessageGatewayName:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case config2.Config.RpcRegisterName.OpenImMsgName:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
case "Transfer":
|
||||
return []prometheus.Collector{MsgInsertRedisSuccessCounter, MsgInsertRedisFailedCounter, MsgInsertMongoSuccessCounter, MsgInsertMongoFailedCounter, SeqSetFailedCounter}
|
||||
case config2.Config.RpcRegisterName.OpenImPushName:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
case config2.Config.RpcRegisterName.OpenImAuthName:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetGinCusMetrics(name string) []*ginPrometheus.Metric {
|
||||
switch name {
|
||||
case "Api":
|
||||
return []*ginPrometheus.Metric{ApiCustomCnt}
|
||||
default:
|
||||
return []*ginPrometheus.Metric{ApiCustomCnt}
|
||||
}
|
||||
}
|
||||
16
pkg/common/prom_metrics/gin-api.go
Normal file
16
pkg/common/prom_metrics/gin-api.go
Normal file
@ -0,0 +1,16 @@
|
||||
package prom_metrics
|
||||
|
||||
import ginProm "github.com/openimsdk/open-im-server/v3/pkg/common/ginPrometheus"
|
||||
|
||||
/*
|
||||
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
|
||||
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc()
|
||||
*/
|
||||
var (
|
||||
ApiCustomCnt = &ginProm.Metric{
|
||||
Name: "custom_total",
|
||||
Description: "Custom counter events.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"label_one", "label_two"},
|
||||
}
|
||||
)
|
||||
12
pkg/common/prom_metrics/grpc-auth.go
Normal file
12
pkg/common/prom_metrics/grpc-auth.go
Normal file
@ -0,0 +1,12 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
UserLoginCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_login_total",
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
||||
24
pkg/common/prom_metrics/grpc-msg.go
Normal file
24
pkg/common/prom_metrics/grpc-msg.go
Normal file
@ -0,0 +1,24 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
SingleChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_success_total",
|
||||
Help: "The number of single chat msg successful processed",
|
||||
})
|
||||
SingleChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_failed_total",
|
||||
Help: "The number of single chat msg failed processed",
|
||||
})
|
||||
GroupChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_success_total",
|
||||
Help: "The number of group chat msg successful processed",
|
||||
})
|
||||
GroupChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_failed_total",
|
||||
Help: "The number of group chat msg failed processed",
|
||||
})
|
||||
)
|
||||
12
pkg/common/prom_metrics/grpc-msggateway.go
Normal file
12
pkg/common/prom_metrics/grpc-msggateway.go
Normal file
@ -0,0 +1,12 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
OnlineUserGauge = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "online_user_num",
|
||||
Help: "The number of online user num",
|
||||
})
|
||||
)
|
||||
12
pkg/common/prom_metrics/grpc_push.go
Normal file
12
pkg/common/prom_metrics/grpc_push.go
Normal file
@ -0,0 +1,12 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
MsgOfflinePushFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_offline_push_failed_total",
|
||||
Help: "The number of msg failed offline pushed",
|
||||
})
|
||||
)
|
||||
28
pkg/common/prom_metrics/transfer.go
Normal file
28
pkg/common/prom_metrics/transfer.go
Normal file
@ -0,0 +1,28 @@
|
||||
package prom_metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
MsgInsertRedisSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_success_total",
|
||||
Help: "The number of successful insert msg to redis",
|
||||
})
|
||||
MsgInsertRedisFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_failed_total",
|
||||
Help: "The number of failed insert msg to redis",
|
||||
})
|
||||
MsgInsertMongoSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_success_total",
|
||||
Help: "The number of successful insert msg to mongo",
|
||||
})
|
||||
MsgInsertMongoFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_failed_total",
|
||||
Help: "The number of failed insert msg to mongo",
|
||||
})
|
||||
SeqSetFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_set_failed_total",
|
||||
Help: "The number of failed set seq",
|
||||
})
|
||||
)
|
||||
@ -1,15 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prome // import "github.com/openimsdk/open-im-server/v3/pkg/common/prome"
|
||||
@ -1,470 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prome
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// auth rpc.
|
||||
UserLoginCounter prometheus.Counter
|
||||
UserRegisterCounter prometheus.Counter
|
||||
|
||||
// seg.
|
||||
SeqGetSuccessCounter prometheus.Counter
|
||||
SeqGetFailedCounter prometheus.Counter
|
||||
SeqSetSuccessCounter prometheus.Counter
|
||||
SeqSetFailedCounter prometheus.Counter
|
||||
|
||||
// msg-db.
|
||||
MsgInsertRedisSuccessCounter prometheus.Counter
|
||||
MsgInsertRedisFailedCounter prometheus.Counter
|
||||
MsgInsertMongoSuccessCounter prometheus.Counter
|
||||
MsgInsertMongoFailedCounter prometheus.Counter
|
||||
MsgPullFromRedisSuccessCounter prometheus.Counter
|
||||
MsgPullFromRedisFailedCounter prometheus.Counter
|
||||
MsgPullFromMongoSuccessCounter prometheus.Counter
|
||||
MsgPullFromMongoFailedCounter prometheus.Counter
|
||||
|
||||
// msg-ws.
|
||||
MsgRecvTotalCounter prometheus.Counter
|
||||
GetNewestSeqTotalCounter prometheus.Counter
|
||||
PullMsgBySeqListTotalCounter prometheus.Counter
|
||||
|
||||
SingleChatMsgRecvSuccessCounter prometheus.Counter
|
||||
GroupChatMsgRecvSuccessCounter prometheus.Counter
|
||||
WorkSuperGroupChatMsgRecvSuccessCounter prometheus.Counter
|
||||
OnlineUserGauge prometheus.Gauge
|
||||
|
||||
// msg-msg.
|
||||
SingleChatMsgProcessSuccessCounter prometheus.Counter
|
||||
SingleChatMsgProcessFailedCounter prometheus.Counter
|
||||
GroupChatMsgProcessSuccessCounter prometheus.Counter
|
||||
GroupChatMsgProcessFailedCounter prometheus.Counter
|
||||
WorkSuperGroupChatMsgProcessSuccessCounter prometheus.Counter
|
||||
WorkSuperGroupChatMsgProcessFailedCounter prometheus.Counter
|
||||
|
||||
// msg-push.
|
||||
MsgOnlinePushSuccessCounter prometheus.Counter
|
||||
MsgOfflinePushSuccessCounter prometheus.Counter
|
||||
MsgOfflinePushFailedCounter prometheus.Counter
|
||||
// api.
|
||||
ApiRequestCounter prometheus.Counter
|
||||
ApiRequestSuccessCounter prometheus.Counter
|
||||
ApiRequestFailedCounter prometheus.Counter
|
||||
|
||||
// grpc.
|
||||
GrpcRequestCounter prometheus.Counter
|
||||
GrpcRequestSuccessCounter prometheus.Counter
|
||||
GrpcRequestFailedCounter prometheus.Counter
|
||||
|
||||
SendMsgCounter prometheus.Counter
|
||||
|
||||
// conversation.
|
||||
ConversationCreateSuccessCounter prometheus.Counter
|
||||
ConversationCreateFailedCounter prometheus.Counter
|
||||
)
|
||||
|
||||
func NewUserLoginCounter() {
|
||||
if UserLoginCounter != nil {
|
||||
return
|
||||
}
|
||||
UserLoginCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_login",
|
||||
Help: "The number of user login",
|
||||
})
|
||||
}
|
||||
|
||||
func NewUserRegisterCounter() {
|
||||
if UserRegisterCounter != nil {
|
||||
return
|
||||
}
|
||||
UserRegisterCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_register",
|
||||
Help: "The number of user register",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSeqGetSuccessCounter() {
|
||||
if SeqGetSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
SeqGetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_get_success",
|
||||
Help: "The number of successful get seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSeqGetFailedCounter() {
|
||||
if SeqGetFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
SeqGetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_get_failed",
|
||||
Help: "The number of failed get seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSeqSetSuccessCounter() {
|
||||
if SeqSetSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
SeqSetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_set_success",
|
||||
Help: "The number of successful set seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSeqSetFailedCounter() {
|
||||
if SeqSetFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
SeqSetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_set_failed",
|
||||
Help: "The number of failed set seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewApiRequestCounter() {
|
||||
if ApiRequestCounter != nil {
|
||||
return
|
||||
}
|
||||
ApiRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_request",
|
||||
Help: "The number of api request",
|
||||
})
|
||||
}
|
||||
|
||||
func NewApiRequestSuccessCounter() {
|
||||
if ApiRequestSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
ApiRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_request_success",
|
||||
Help: "The number of api request success",
|
||||
})
|
||||
}
|
||||
|
||||
func NewApiRequestFailedCounter() {
|
||||
if ApiRequestFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
ApiRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "api_request_failed",
|
||||
Help: "The number of api request failed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGrpcRequestCounter() {
|
||||
if GrpcRequestCounter != nil {
|
||||
return
|
||||
}
|
||||
GrpcRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "grpc_request",
|
||||
Help: "The number of api request",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGrpcRequestSuccessCounter() {
|
||||
if GrpcRequestSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
GrpcRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "grpc_request_success",
|
||||
Help: "The number of grpc request success",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGrpcRequestFailedCounter() {
|
||||
if GrpcRequestFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
GrpcRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "grpc_request_failed",
|
||||
Help: "The number of grpc request failed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSendMsgCount() {
|
||||
if SendMsgCounter != nil {
|
||||
return
|
||||
}
|
||||
SendMsgCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "send_msg",
|
||||
Help: "The number of send msg",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertRedisSuccessCounter() {
|
||||
if MsgInsertRedisSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgInsertRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_success",
|
||||
Help: "The number of successful insert msg to redis",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertRedisFailedCounter() {
|
||||
if MsgInsertRedisFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgInsertRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_failed",
|
||||
Help: "The number of failed insert msg to redis",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertMongoSuccessCounter() {
|
||||
if MsgInsertMongoSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgInsertMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_success",
|
||||
Help: "The number of successful insert msg to mongo",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertMongoFailedCounter() {
|
||||
if MsgInsertMongoFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgInsertMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_failed",
|
||||
Help: "The number of failed insert msg to mongo",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgPullFromRedisSuccessCounter() {
|
||||
if MsgPullFromRedisSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgPullFromRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_pull_from_redis_success",
|
||||
Help: "The number of successful pull msg from redis",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgPullFromRedisFailedCounter() {
|
||||
if MsgPullFromRedisFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgPullFromRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_pull_from_redis_failed",
|
||||
Help: "The number of failed pull msg from redis",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgPullFromMongoSuccessCounter() {
|
||||
if MsgPullFromMongoSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgPullFromMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_pull_from_mongo_success",
|
||||
Help: "The number of successful pull msg from mongo",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgPullFromMongoFailedCounter() {
|
||||
if MsgPullFromMongoFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgPullFromMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_pull_from_mongo_failed",
|
||||
Help: "The number of failed pull msg from mongo",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgRecvTotalCounter() {
|
||||
if MsgRecvTotalCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgRecvTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_recv_total",
|
||||
Help: "The number of msg received",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGetNewestSeqTotalCounter() {
|
||||
if GetNewestSeqTotalCounter != nil {
|
||||
return
|
||||
}
|
||||
GetNewestSeqTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "get_newest_seq_total",
|
||||
Help: "the number of get newest seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewPullMsgBySeqListTotalCounter() {
|
||||
if PullMsgBySeqListTotalCounter != nil {
|
||||
return
|
||||
}
|
||||
PullMsgBySeqListTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "pull_msg_by_seq_list_total",
|
||||
Help: "The number of pull msg by seq list",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSingleChatMsgRecvSuccessCounter() {
|
||||
if SingleChatMsgRecvSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
SingleChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_recv_success",
|
||||
Help: "The number of single chat msg successful received ",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGroupChatMsgRecvSuccessCounter() {
|
||||
if GroupChatMsgRecvSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
GroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_recv_success",
|
||||
Help: "The number of group chat msg successful received",
|
||||
})
|
||||
}
|
||||
|
||||
func NewWorkSuperGroupChatMsgRecvSuccessCounter() {
|
||||
if WorkSuperGroupChatMsgRecvSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
WorkSuperGroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "work_super_group_chat_msg_recv_success",
|
||||
Help: "The number of work/super group chat msg successful received",
|
||||
})
|
||||
}
|
||||
|
||||
func NewOnlineUserGauges() {
|
||||
if OnlineUserGauge != nil {
|
||||
return
|
||||
}
|
||||
OnlineUserGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "online_user_num",
|
||||
Help: "The number of online user num",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSingleChatMsgProcessSuccessCounter() {
|
||||
if SingleChatMsgProcessSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
SingleChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_success",
|
||||
Help: "The number of single chat msg successful processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSingleChatMsgProcessFailedCounter() {
|
||||
if SingleChatMsgProcessFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
SingleChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_failed",
|
||||
Help: "The number of single chat msg failed processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGroupChatMsgProcessSuccessCounter() {
|
||||
if GroupChatMsgProcessSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
GroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_success",
|
||||
Help: "The number of group chat msg successful processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewGroupChatMsgProcessFailedCounter() {
|
||||
if GroupChatMsgProcessFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
GroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_failed",
|
||||
Help: "The number of group chat msg failed processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewWorkSuperGroupChatMsgProcessSuccessCounter() {
|
||||
if WorkSuperGroupChatMsgProcessSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
WorkSuperGroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "work_super_group_chat_msg_process_success",
|
||||
Help: "The number of work/super group chat msg successful processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewWorkSuperGroupChatMsgProcessFailedCounter() {
|
||||
if WorkSuperGroupChatMsgProcessFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
WorkSuperGroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "work_super_group_chat_msg_process_failed",
|
||||
Help: "The number of work/super group chat msg failed processed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgOnlinePushSuccessCounter() {
|
||||
if MsgOnlinePushSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgOnlinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_online_push_success",
|
||||
Help: "The number of msg successful online pushed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgOfflinePushSuccessCounter() {
|
||||
if MsgOfflinePushSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgOfflinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_offline_push_success",
|
||||
Help: "The number of msg successful offline pushed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgOfflinePushFailedCounter() {
|
||||
if MsgOfflinePushFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
MsgOfflinePushFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_offline_push_failed",
|
||||
Help: "The number of msg failed offline pushed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewConversationCreateSuccessCounter() {
|
||||
if ConversationCreateSuccessCounter != nil {
|
||||
return
|
||||
}
|
||||
ConversationCreateSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "conversation_push_success",
|
||||
Help: "The number of conversation successful pushed",
|
||||
})
|
||||
}
|
||||
|
||||
func NewConversationCreateFailedCounter() {
|
||||
if ConversationCreateFailedCounter != nil {
|
||||
return
|
||||
}
|
||||
ConversationCreateFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "conversation_push_failed",
|
||||
Help: "The number of conversation failed pushed",
|
||||
})
|
||||
}
|
||||
@ -1,97 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prome
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
func StartPrometheusSrv(prometheusPort int) error {
|
||||
if config.Config.Prometheus.Enable {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
err := http.ListenAndServe(":"+strconv.Itoa(prometheusPort), nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrometheusHandler() gin.HandlerFunc {
|
||||
h := promhttp.Handler()
|
||||
return func(c *gin.Context) {
|
||||
h.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
}
|
||||
|
||||
type responseBodyWriter struct {
|
||||
gin.ResponseWriter
|
||||
body *bytes.Buffer
|
||||
}
|
||||
|
||||
func (r responseBodyWriter) Write(b []byte) (int, error) {
|
||||
r.body.Write(b)
|
||||
return r.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func PrometheusMiddleware(c *gin.Context) {
|
||||
Inc(ApiRequestCounter)
|
||||
w := &responseBodyWriter{body: &bytes.Buffer{}, ResponseWriter: c.Writer}
|
||||
c.Writer = w
|
||||
c.Next()
|
||||
if c.Writer.Status() == http.StatusOK {
|
||||
Inc(ApiRequestSuccessCounter)
|
||||
} else {
|
||||
Inc(ApiRequestFailedCounter)
|
||||
}
|
||||
}
|
||||
|
||||
func Inc(counter prometheus.Counter) {
|
||||
if config.Config.Prometheus.Enable {
|
||||
if counter != nil {
|
||||
counter.Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Add(counter prometheus.Counter, add int) {
|
||||
if config.Config.Prometheus.Enable {
|
||||
if counter != nil {
|
||||
counter.Add(float64(add))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GaugeInc(gauges prometheus.Gauge) {
|
||||
if config.Config.Prometheus.Enable {
|
||||
if gauges != nil {
|
||||
gauges.Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GaugeDec(gauges prometheus.Gauge) {
|
||||
if config.Config.Prometheus.Enable {
|
||||
if gauges != nil {
|
||||
gauges.Dec()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -16,7 +16,12 @@ package startrpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prom_metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
@ -29,7 +34,6 @@ import (
|
||||
"github.com/OpenIMSDK/tools/discoveryregistry"
|
||||
"github.com/OpenIMSDK/tools/mw"
|
||||
"github.com/OpenIMSDK/tools/network"
|
||||
"github.com/OpenIMSDK/tools/prome"
|
||||
"github.com/OpenIMSDK/tools/utils"
|
||||
)
|
||||
|
||||
@ -41,7 +45,7 @@ func Start(
|
||||
rpcFn func(client discoveryregistry.SvcDiscoveryRegistry, server *grpc.Server) error,
|
||||
options ...grpc.ServerOption,
|
||||
) error {
|
||||
fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s",
|
||||
fmt.Printf("start %s server, port: %d, prometheusPort: %d, OpenIM version: %s\n",
|
||||
rpcRegisterName, rpcPort, prometheusPort, config.Version)
|
||||
listener, err := net.Listen(
|
||||
"tcp",
|
||||
@ -61,16 +65,15 @@ func Start(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var reg *prometheus.Registry
|
||||
var metric *grpcprometheus.ServerMetrics
|
||||
// ctx 中间件
|
||||
if config.Config.Prometheus.Enable {
|
||||
prome.NewGrpcRequestCounter()
|
||||
prome.NewGrpcRequestFailedCounter()
|
||||
prome.NewGrpcRequestSuccessCounter()
|
||||
unaryInterceptor := mw.InterceptChain(grpcprometheus.UnaryServerInterceptor, mw.RpcServerInterceptor)
|
||||
options = append(options, []grpc.ServerOption{
|
||||
grpc.StreamInterceptor(grpcprometheus.StreamServerInterceptor),
|
||||
grpc.UnaryInterceptor(unaryInterceptor),
|
||||
}...)
|
||||
//////////////////////////
|
||||
cusMetrics := prom_metrics.GetGrpcCusMetrics(rpcRegisterName)
|
||||
reg, metric, err = prom_metrics.NewGrpcPromObj(cusMetrics)
|
||||
options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
|
||||
grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
|
||||
} else {
|
||||
options = append(options, mw.GrpcServer())
|
||||
}
|
||||
@ -91,8 +94,11 @@ func Start(
|
||||
}
|
||||
go func() {
|
||||
if config.Config.Prometheus.Enable && prometheusPort != 0 {
|
||||
if err := prome.StartPrometheusSrv(prometheusPort); err != nil {
|
||||
panic(err.Error())
|
||||
metric.InitializeMetrics(srv)
|
||||
// Create a HTTP server for prometheus.
|
||||
httpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||
if err := httpServer.ListenAndServe(); err != nil {
|
||||
log.Fatal("Unable to start a http server.")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -118,10 +118,9 @@ func GetNotificationConversationIDByConversationID(conversationID string) string
|
||||
l := strings.Split(conversationID, "_")
|
||||
if len(l) > 1 {
|
||||
l[0] = "n"
|
||||
return strings.Join(l, "_")
|
||||
} else {
|
||||
return ""
|
||||
return conversationID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetNotificationConversationID(sessionType int, ids ...string) string {
|
||||
|
||||
@ -24,11 +24,9 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
|
||||
source "${OPENIM_ROOT}/scripts/lib/init.sh"
|
||||
|
||||
# 定义一个配置文件数组,其中包含需要生成的配置文件的名称路径
|
||||
# (en: Define a profile array that contains the name path of the profile to be generated.)
|
||||
readonly ENV_FILE=${ENV_FILE:-"${OPENIM_ROOT}/scripts/install/environment.sh"}
|
||||
|
||||
# 定义关联数组,其中键是模板文件,值是对应的输出文件
|
||||
# (en: Defines an associative array where the keys are the template files and the values are the corresponding output files.)
|
||||
declare -A TEMPLATES=(
|
||||
["${OPENIM_ROOT}/deployments/templates/env_template.yaml"]="${OPENIM_ROOT}/.env"
|
||||
|
||||
@ -25,10 +25,10 @@ OPENIM_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
LOCAL_OUTPUT_ROOT=""${OPENIM_ROOT}"/${OUT_DIR:-_output}"
|
||||
source "${OPENIM_ROOT}/scripts/lib/init.sh"
|
||||
|
||||
#TODO: Access to the IP networks outside, or you want to use the IP network
|
||||
# IP=127.0.0.1
|
||||
if [ -z "${IP}" ]; then
|
||||
IP=$(openim::util::get_server_ip)
|
||||
#TODO: Access to the OPENIM_IP networks outside, or you want to use the OPENIM_IP network
|
||||
# OPENIM_IP=127.0.0.1
|
||||
if [ -z "${OPENIM_IP}" ]; then
|
||||
OPENIM_IP=$(openim::util::get_server_ip)
|
||||
fi
|
||||
|
||||
# config.gateway custom bridge modes
|
||||
@ -49,14 +49,14 @@ def "DATA_DIR" "${OPENIM_ROOT}"
|
||||
def "USER" "root"
|
||||
|
||||
# 设置统一的密码,方便记忆
|
||||
def "PASSWORD" "openIM123"
|
||||
readonly PASSWORD=${PASSWORD:-'openIM123'}
|
||||
|
||||
# 设置统一的数据库名称,方便管理
|
||||
def "DATABASE_NAME" "openIM_v3"
|
||||
|
||||
# Linux系统 openim 用户
|
||||
def "LINUX_USERNAME" "openim"
|
||||
def "LINUX_PASSWORD" "${PASSWORD}"
|
||||
readonly LINUX_PASSWORD=${LINUX_PASSWORD:-"${PASSWORD}"}
|
||||
|
||||
# 设置安装目录
|
||||
def "INSTALL_DIR" "${LOCAL_OUTPUT_ROOT}/installs"
|
||||
@ -167,7 +167,8 @@ def "ZOOKEEPER_PASSWORD" "" # Zookeeper的密码
|
||||
def "MYSQL_PORT" "13306" # MySQL的端口
|
||||
def "MYSQL_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # MySQL的地址
|
||||
def "MYSQL_USERNAME" "${USER}" # MySQL的用户名
|
||||
def "MYSQL_PASSWORD" "${PASSWORD}" # MySQL的密码
|
||||
# MySQL的密码
|
||||
readonly MYSQL_PASSWORD=${MYSQL_PASSWORD:-"${PASSWORD}"}
|
||||
def "MYSQL_DATABASE" "${DATABASE_NAME}" # MySQL的数据库名
|
||||
def "MYSQL_MAX_OPEN_CONN" "1000" # 最大打开的连接数
|
||||
def "MYSQL_MAX_IDLE_CONN" "100" # 最大空闲连接数
|
||||
@ -181,12 +182,13 @@ def "MONGO_PORT" "37017" # MongoDB的端口
|
||||
def "MONGO_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # MongoDB的地址
|
||||
def "MONGO_DATABASE" "${DATABASE_NAME}" # MongoDB的数据库名
|
||||
def "MONGO_USERNAME" "${USER}" # MongoDB的用户名
|
||||
def "MONGO_PASSWORD" "${PASSWORD}" # MongoDB的密码
|
||||
# MongoDB的密码
|
||||
readonly MONGO_PASSWORD=${MONGO_PASSWORD:-"${PASSWORD}"}
|
||||
def "MONGO_MAX_POOL_SIZE" "100" # 最大连接池大小
|
||||
|
||||
###################### Object 配置信息 ######################
|
||||
# app要能访问到此ip和端口或域名
|
||||
readonly API_URL=${API_URL:-"http://${IP}:${API_OPENIM_PORT}"}
|
||||
readonly API_URL=${API_URL:-"http://${OPENIM_IP}:${API_OPENIM_PORT}"}
|
||||
|
||||
def "OBJECT_ENABLE" "minio" # 对象是否启用
|
||||
# 对象的API地址
|
||||
@ -197,9 +199,9 @@ def "MINIO_PORT" "10005" # MinIO的端口
|
||||
def MINIO_ADDRESS "${DOCKER_BRIDGE_GATEWAY}"
|
||||
readonly MINIO_ENDPOINT=${MINIO_ENDPOINT:-"http://${MINIO_ADDRESS}:${MINIO_PORT}"}
|
||||
def "MINIO_ACCESS_KEY" "${USER}" # MinIO的访问密钥ID
|
||||
def "MINIO_SECRET_KEY" "${PASSWORD}" # MinIO的密钥
|
||||
readonly MINIO_SECRET_KEY=${MINIO_SECRET_KEY:-"${PASSWORD}"}
|
||||
def "MINIO_SESSION_TOKEN" # MinIO的会话令牌
|
||||
readonly MINIO_SIGN_ENDPOINT=${MINIO_SIGN_ENDPOINT:-"http://${IP}:${MINIO_PORT}"} # signEndpoint为minio公网地址
|
||||
readonly MINIO_SIGN_ENDPOINT=${MINIO_SIGN_ENDPOINT:-"http://${OPENIM_IP}:${MINIO_PORT}"} # signEndpoint为minio公网地址
|
||||
def "MINIO_PUBLIC_READ" "false" # 公有读
|
||||
|
||||
# 腾讯云COS的存储桶URL
|
||||
@ -220,7 +222,7 @@ def "OSS_PUBLIC_READ" "false" # 公有
|
||||
def "REDIS_PORT" "16379" # Redis的端口
|
||||
def "REDIS_ADDRESS" "${DOCKER_BRIDGE_GATEWAY}" # Redis的地址
|
||||
def "REDIS_USERNAME" # Redis的用户名
|
||||
def "REDIS_PASSWORD" "${PASSWORD}" # Redis的密码
|
||||
readonly REDIS_PASSWORD=${REDIS_PASSWORD:-"${PASSWORD}"}
|
||||
|
||||
###################### Kafka 配置信息 ######################
|
||||
def "KAFKA_USERNAME" # `Kafka` 的用户名
|
||||
@ -329,7 +331,8 @@ def "RETAIN_CHAT_RECORDS" "365" # 保留聊天记录
|
||||
readonly CHAT_RECORDS_CLEAR_TIME=${CHAT_RECORDS_CLEAR_TIME:-'0 2 * * 3'}
|
||||
# 消息销毁时间
|
||||
readonly MSG_DESTRUCT_TIME=${MSG_DESTRUCT_TIME:-'0 2 * * *'}
|
||||
def "SECRET" "${PASSWORD}" # 密钥
|
||||
# 密钥
|
||||
readonly SECRET=${SECRET:-"${PASSWORD}"}
|
||||
def "TOKEN_EXPIRE" "90" # Token到期时间
|
||||
def "FRIEND_VERIFY" "false" # 朋友验证
|
||||
def "IOS_PUSH_SOUND" "xxx" # IOS推送声音
|
||||
@ -338,6 +341,9 @@ def "IOS_PRODUCTION" "false" # IOS生产
|
||||
|
||||
###################### Prometheus 配置信息 ######################
|
||||
def "PROMETHEUS_ENABLE" "false" # 是否启用 Prometheus
|
||||
def "PROMETHEUS_URL" "/prometheus"
|
||||
# Api 服务的 Prometheus 端口
|
||||
readonly API_PROM_PORT=${API_PROM_PORT:-'20100'}
|
||||
# User 服务的 Prometheus 端口
|
||||
readonly USER_PROM_PORT=${USER_PROM_PORT:-'20110'}
|
||||
# Friend 服务的 Prometheus 端口
|
||||
|
||||
@ -49,13 +49,13 @@ function openim::msgtransfer::start()
|
||||
openim::log::error_exit "OPENIM_MSGGATEWAY_NUM must be equal to the number of MSG_TRANSFER_PROM_PORTS"
|
||||
fi
|
||||
|
||||
for (( i=1; i<=$OPENIM_MSGGATEWAY_NUM; i++ )) do
|
||||
for (( i=0; i<$OPENIM_MSGGATEWAY_NUM; i++ )) do
|
||||
openim::log::info "prometheus port: ${MSG_TRANSFER_PROM_PORTS[$i]}"
|
||||
PROMETHEUS_PORT_OPTION=""
|
||||
if [[ -n "${OPENIM_PROMETHEUS_PORTS[$i]}" ]]; then
|
||||
PROMETHEUS_PORT_OPTION="--prometheus_port ${OPENIM_PROMETHEUS_PORTS[$i]}"
|
||||
fi
|
||||
nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} >> ${LOG_FILE} 2>&1 &
|
||||
nohup ${OPENIM_MSGTRANSFER_BINARY} ${PROMETHEUS_PORT_OPTION} -c ${OPENIM_MSGTRANSFER_CONFIG} -n ${i}>> ${LOG_FILE} 2>&1 &
|
||||
done
|
||||
|
||||
openim::util::check_process_names "${OPENIM_OUTPUT_HOSTBIN}/${SERVER_NAME}"
|
||||
|
||||
@ -1,561 +1,332 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright © 2023 OpenIM. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# The root of the build/dist directory
|
||||
IAM_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
|
||||
[[ -z ${COMMON_SOURCED} ]] && source ${IAM_ROOT}/scripts/install/common.sh
|
||||
|
||||
# A set of helpers for tests
|
||||
# API Server API Address:Port
|
||||
INSECURE_OPENIMAPI=${IAM_APISERVER_HOST}:${API_OPENIM_PORT}
|
||||
INSECURE_OPENIMAUTO=${OPENIM_RPC_AUTH_HOST}:${OPENIM_AUTH_PORT}
|
||||
|
||||
openim::test::clear_all() {
|
||||
if openim::test::if_supports_resource "rc" ; then
|
||||
# shellcheck disable=SC2154
|
||||
# Disabling because "kube_flags" is set in a parent script
|
||||
kubectl delete "${kube_flags[@]}" rc --all --grace-period=0 --force
|
||||
fi
|
||||
if openim::test::if_supports_resource "pods" ; then
|
||||
kubectl delete "${kube_flags[@]}" pods --all --grace-period=0 --force
|
||||
fi
|
||||
Header="-HContent-Type: application/json"
|
||||
CCURL="curl -f -s -XPOST" # Create
|
||||
UCURL="curl -f -s -XPUT" # Update
|
||||
RCURL="curl -f -s -XGET" # Retrieve
|
||||
DCURL="curl -f -s -XDELETE" # Delete
|
||||
|
||||
openim::test::user()
|
||||
{
|
||||
token="-HAuthorization: Bearer $(openim::test::login)"
|
||||
|
||||
# 1. If colin, mark, john users exist, clear them first
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/mark; echo
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/john; echo
|
||||
|
||||
# 2. Create colin, mark, john users
|
||||
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/users \
|
||||
-d'{"password":"User@2021","metadata":{"name":"colin"},"nickname":"colin","email":"colin@foxmail.com","phone":"1812884xxxx"}'; echo
|
||||
|
||||
# 3. List all users
|
||||
${RCURL} "${token}" "http://${INSECURE_OPENIMAPI}/v1/users?offset=0&limit=10"; echo
|
||||
|
||||
# 4. Get detailed information of colin user
|
||||
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
|
||||
|
||||
# 5. Modify colin user
|
||||
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin \
|
||||
-d'{"nickname":"colin","email":"colin_modified@foxmail.com","phone":"1812884xxxx"}'; echo
|
||||
|
||||
# 6. Delete colin user
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/users/colin; echo
|
||||
|
||||
# 7. Batch delete users
|
||||
${DCURL} "${token}" "http://${INSECURE_OPENIMAPI}/v1/users?name=mark&name=john"; echo
|
||||
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/user test passed!\033[0m')"
|
||||
}
|
||||
|
||||
# Prints the calling file and line number $1 levels deep
|
||||
# Defaults to 2 levels so you can call this to find your own caller
|
||||
openim::test::get_caller() {
|
||||
local levels=${1:-2}
|
||||
local caller_file="${BASH_SOURCE[${levels}]}"
|
||||
local caller_line="${BASH_LINENO[${levels}-1]}"
|
||||
echo "$(basename "${caller_file}"):${caller_line}"
|
||||
# userRouterGroup := r.Group("/user")
|
||||
# {
|
||||
# userRouterGroup.POST("/user_register", u.UserRegister)
|
||||
# userRouterGroup.POST("/update_user_info", ParseToken, u.UpdateUserInfo)
|
||||
# userRouterGroup.POST("/set_global_msg_recv_opt", ParseToken, u.SetGlobalRecvMessageOpt)
|
||||
# userRouterGroup.POST("/get_users_info", ParseToken, u.GetUsersPublicInfo)
|
||||
# userRouterGroup.POST("/get_all_users_uid", ParseToken, u.GetAllUsersID)
|
||||
# userRouterGroup.POST("/account_check", ParseToken, u.AccountCheck)
|
||||
# userRouterGroup.POST("/get_users", ParseToken, u.GetUsers)
|
||||
# userRouterGroup.POST("/get_users_online_status", ParseToken, u.GetUsersOnlineStatus)
|
||||
# userRouterGroup.POST("/get_users_online_token_detail", ParseToken, u.GetUsersOnlineTokenDetail)
|
||||
# userRouterGroup.POST("/subscribe_users_status", ParseToken, u.SubscriberStatus)
|
||||
# userRouterGroup.POST("/get_users_status", ParseToken, u.GetUserStatus)
|
||||
# userRouterGroup.POST("/get_subscribe_users_status", ParseToken, u.GetSubscribeUsersStatus)
|
||||
# }
|
||||
openim::test::group()
|
||||
{
|
||||
token="-HAuthorization: Bearer $(openim::test::login)"
|
||||
}
|
||||
|
||||
# Force exact match of a returned result for a object query. Wrap this with || to support multiple
|
||||
# valid return types.
|
||||
# This runs `kubectl get` once and asserts that the result is as expected.
|
||||
# $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
openim::test::get_object_assert() {
|
||||
openim::test::object_assert 1 "$@"
|
||||
# Define a function to register a user
|
||||
openim::register_user()
|
||||
{
|
||||
user_register_response=$(${CCURL} "${Header}" http://localhost:10002/user/user_register \
|
||||
-d'{
|
||||
"secret": "openIM123",
|
||||
"users": [{"userID": "11111112","nickname": "yourNickname","faceURL": "yourFaceURL"}]
|
||||
}')
|
||||
|
||||
echo "$user_register_response"
|
||||
}
|
||||
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Runs the query multiple times before failing it.
|
||||
# $1: Object on which get should be run
|
||||
# $2: The go-template to run on the result
|
||||
# $3: The expected output
|
||||
# $4: Additional args to be passed to kubectl
|
||||
openim::test::wait_object_assert() {
|
||||
openim::test::object_assert 10 "$@"
|
||||
# Define a function to get a token
|
||||
openim::get_token()
|
||||
{
|
||||
token_response=$(${CCURL} "${Header}" http://localhost:10002/auth/user_token \
|
||||
-d'{
|
||||
"secret": "openIM123",
|
||||
"platformID": 1,
|
||||
"userID": "11111112"
|
||||
}')
|
||||
|
||||
token=$(echo $token_response | grep -Po 'token[" :]+\K[^"]+')
|
||||
echo "$token"
|
||||
}
|
||||
|
||||
# Asserts that the output of a given get query is as expected.
|
||||
# Can run the query multiple times before failing it.
|
||||
# $1: Number of times the query should be run before failing it.
|
||||
# $2: Object on which get should be run
|
||||
# $3: The go-template to run on the result
|
||||
# $4: The expected output
|
||||
# $5: Additional args to be passed to kubectl
|
||||
openim::test::object_assert() {
|
||||
local tries=$1
|
||||
local object=$2
|
||||
local request=$3
|
||||
local expected=$4
|
||||
local args=${5:-}
|
||||
|
||||
for j in $(seq 1 "${tries}"); do
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling because to allow for expansion here
|
||||
res=$(kubectl get "${kube_flags[@]}" ${args} ${object} -o go-template="${request}")
|
||||
if [[ "${res}" =~ ^$expected$ ]]; then
|
||||
echo -n "${green}"
|
||||
echo "$(openim::test::get_caller 3): Successful get ${object} ${request}: ${res}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
fi
|
||||
echo "Waiting for Get ${object} ${request} ${args}: expected: ${expected}, got: ${res}"
|
||||
sleep $((j-1))
|
||||
done
|
||||
|
||||
echo "${bold}${red}"
|
||||
echo "$(openim::test::get_caller 3): FAIL!"
|
||||
echo "Get ${object} ${request}"
|
||||
echo " Expected: ${expected}"
|
||||
echo " Got: ${res}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
# Define a function to check the account
|
||||
openim::check_account()
|
||||
{
|
||||
local token=$1
|
||||
account_check_response=$(${CCURL} "${Header}" -H"operationID: 1646445464564" -H"token: ${token}" http://localhost:10002/user/account_check \
|
||||
-d'{
|
||||
"checkUserIDs": ["11111111","11111112"]
|
||||
}')
|
||||
|
||||
echo "$account_check_response"
|
||||
}
|
||||
|
||||
openim::test::get_object_jsonpath_assert() {
|
||||
local object=$1
|
||||
local request=$2
|
||||
local expected=$3
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
res=$(kubectl get "${kube_flags[@]}" ${object} -o jsonpath=${request})
|
||||
|
||||
if [[ "${res}" =~ ^$expected$ ]]; then
|
||||
echo -n "${green}"
|
||||
echo "$(openim::test::get_caller): Successful get ${object} ${request}: ${res}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "$(openim::test::get_caller): FAIL!"
|
||||
echo "Get ${object} ${request}"
|
||||
echo " Expected: ${expected}"
|
||||
echo " Got: ${res}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
openim::test::describe_object_assert() {
|
||||
local resource=$1
|
||||
local object=$2
|
||||
local matches=( "${@:3}" )
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
|
||||
|
||||
for match in "${matches[@]}"; do
|
||||
if grep -q "${match}" <<< "${result}"; then
|
||||
echo "matched ${match}"
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "$(openim::test::get_caller): FAIL!"
|
||||
echo "Describe ${resource} ${object}"
|
||||
echo " Expected Match: ${match}"
|
||||
echo " Not found in:"
|
||||
echo "${result}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo -n "${green}"
|
||||
echo "$(openim::test::get_caller): Successful describe ${resource} ${object}:"
|
||||
echo "${result}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
}
|
||||
|
||||
openim::test::describe_object_events_assert() {
|
||||
local resource=$1
|
||||
local object=$2
|
||||
local showevents=${3:-"true"}
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
if [[ -z "${3:-}" ]]; then
|
||||
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
|
||||
else
|
||||
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource} ${object})
|
||||
fi
|
||||
|
||||
if grep -q "No events.\|Events:" <<< "${result}"; then
|
||||
local has_events="true"
|
||||
else
|
||||
local has_events="false"
|
||||
fi
|
||||
if [[ "${showevents}" == "${has_events}" ]]; then
|
||||
echo -n "${green}"
|
||||
echo "$(openim::test::get_caller): Successful describe"
|
||||
echo "${result}"
|
||||
echo "${reset}"
|
||||
return 0
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "$(openim::test::get_caller): FAIL"
|
||||
if [[ "${showevents}" == "false" ]]; then
|
||||
echo " Events information should not be described in:"
|
||||
else
|
||||
echo " Events information not found in:"
|
||||
fi
|
||||
echo "${result}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
openim::test::describe_resource_assert() {
|
||||
local resource=$1
|
||||
local matches=( "${@:2}" )
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
result=$(kubectl describe "${kube_flags[@]}" ${resource})
|
||||
|
||||
for match in "${matches[@]}"; do
|
||||
if grep -q "${match}" <<< "${result}"; then
|
||||
echo "matched ${match}"
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo "Describe ${resource}"
|
||||
echo " Expected Match: ${match}"
|
||||
echo " Not found in:"
|
||||
echo "${result}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo -n "${green}"
|
||||
echo "Successful describe ${resource}:"
|
||||
echo "${result}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
}
|
||||
|
||||
openim::test::describe_resource_events_assert() {
|
||||
local resource=$1
|
||||
local showevents=${2:-"true"}
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource})
|
||||
|
||||
if grep -q "No events.\|Events:" <<< "${result}"; then
|
||||
local has_events="true"
|
||||
else
|
||||
local has_events="false"
|
||||
fi
|
||||
if [[ "${showevents}" == "${has_events}" ]]; then
|
||||
echo -n "${green}"
|
||||
echo "Successful describe"
|
||||
echo "${result}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL"
|
||||
if [[ "${showevents}" == "false" ]]; then
|
||||
echo " Events information should not be described in:"
|
||||
else
|
||||
echo " Events information not found in:"
|
||||
fi
|
||||
echo "${result}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
openim::test::describe_resource_chunk_size_assert() {
|
||||
# $1: the target resource
|
||||
local resource=$1
|
||||
# $2: comma-separated list of additional resources that will be listed
|
||||
local additionalResources=${2:-}
|
||||
# Remaining args are flags to pass to kubectl
|
||||
local args=${3:-}
|
||||
|
||||
# Expect list requests for the target resource and the additional resources
|
||||
local expectLists
|
||||
IFS="," read -r -a expectLists <<< "${resource},${additionalResources}"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
defaultResult=$(kubectl describe ${resource} --show-events=true -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
|
||||
for r in "${expectLists[@]}"; do
|
||||
if grep -q "${r}?.*limit=500" <<< "${defaultResult}"; then
|
||||
echo "query for ${r} had limit param"
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo "Describe ${resource}"
|
||||
echo " Expected limit param on request for: ${r}"
|
||||
echo " Not found in:"
|
||||
echo "${defaultResult}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling to allow for expansion here
|
||||
# Try a non-default chunk size
|
||||
customResult=$(kubectl describe ${resource} --show-events=false --chunk-size=10 -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
|
||||
if grep -q "${resource}?limit=10" <<< "${customResult}"; then
|
||||
echo "query for ${resource} had user-specified limit param"
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo "Describe ${resource}"
|
||||
echo " Expected limit param on request for: ${r}"
|
||||
echo " Not found in:"
|
||||
echo "${customResult}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -n "${green}"
|
||||
echo "Successful describe ${resource} verbose logs:"
|
||||
echo "${defaultResult}"
|
||||
echo -n "${reset}"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Compare sort-by resource name output (first column, skipping first line) with expected order specify in the last parameter
|
||||
openim::test::if_sort_by_has_correct_order() {
|
||||
local var
|
||||
var="$(echo "$1" | awk '{if(NR!=1) print $1}' | tr '\n' ':')"
|
||||
openim::test::if_has_string "${var}" "${@:$#}"
|
||||
}
|
||||
|
||||
openim::test::if_has_string() {
|
||||
local message=$1
|
||||
local match=$2
|
||||
|
||||
if grep -q "${match}" <<< "${message}"; then
|
||||
echo -n "${green}"
|
||||
echo "Successful"
|
||||
echo -n "${reset}"
|
||||
echo "message:${message}"
|
||||
echo "has:${match}"
|
||||
return 0
|
||||
else
|
||||
echo -n "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo -n "${reset}"
|
||||
echo "message:${message}"
|
||||
echo "has not:${match}"
|
||||
caller
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
openim::test::if_has_not_string() {
|
||||
local message=$1
|
||||
local match=$2
|
||||
|
||||
if grep -q "${match}" <<< "${message}"; then
|
||||
echo -n "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo -n "${reset}"
|
||||
echo "message:${message}"
|
||||
echo "has:${match}"
|
||||
caller
|
||||
return 1
|
||||
else
|
||||
echo -n "${green}"
|
||||
echo "Successful"
|
||||
echo -n "${reset}"
|
||||
echo "message:${message}"
|
||||
echo "has not:${match}"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
openim::test::if_empty_string() {
|
||||
local match=$1
|
||||
if [ -n "${match}" ]; then
|
||||
echo -n "${bold}${red}"
|
||||
echo "FAIL!"
|
||||
echo "${match} is not empty"
|
||||
echo -n "${reset}"
|
||||
caller
|
||||
return 1
|
||||
else
|
||||
echo -n "${green}"
|
||||
echo "Successful"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Returns true if the required resource is part of supported resources.
|
||||
# Expects env vars:
|
||||
# SUPPORTED_RESOURCES: Array of all resources supported by the apiserver. "*"
|
||||
# means it supports all resources. For ex: ("*") or ("rc" "*") both mean that
|
||||
# all resources are supported.
|
||||
# $1: Name of the resource to be tested.
|
||||
openim::test::if_supports_resource() {
|
||||
SUPPORTED_RESOURCES=${SUPPORTED_RESOURCES:-""}
|
||||
REQUIRED_RESOURCE=${1:-""}
|
||||
|
||||
for r in "${SUPPORTED_RESOURCES[@]}"; do
|
||||
if [[ "${r}" == "*" || "${r}" == "${REQUIRED_RESOURCE}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
openim::test::version::object_to_file() {
|
||||
name=$1
|
||||
flags=${2:-""}
|
||||
file=$3
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling because "flags" needs to allow for expansion here
|
||||
kubectl version ${flags} | grep "${name} Version:" | sed -e s/"${name} Version: "/""/g > "${file}"
|
||||
}
|
||||
|
||||
openim::test::version::json_object_to_file() {
|
||||
flags=$1
|
||||
file=$2
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling because "flags" needs to allow for expansion here
|
||||
kubectl version ${flags} --output json | sed -e s/' '/''/g -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e s/'clientVersion:'/'clientVersion:,'/ -e s/'serverVersion:'/'serverVersion:,'/ | tr , '\n' > "${file}"
|
||||
}
|
||||
|
||||
openim::test::version::json_client_server_object_to_file() {
|
||||
flags=$1
|
||||
name=$2
|
||||
file=$3
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling because "flags" needs to allow for expansion here
|
||||
kubectl version ${flags} --output json | jq -r ".${name}" | sed -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e /^$/d -e s/','/''/g -e s/':'/'='/g > "${file}"
|
||||
}
|
||||
|
||||
openim::test::version::yaml_object_to_file() {
|
||||
flags=$1
|
||||
file=$2
|
||||
# shellcheck disable=SC2086
|
||||
# Disabling because "flags" needs to allow for expansion here
|
||||
kubectl version ${flags} --output yaml | sed -e s/' '/''/g -e s/'\"'/''/g -e /^$/d > "${file}"
|
||||
}
|
||||
|
||||
openim::test::version::diff_assert() {
|
||||
local original=$1
|
||||
local comparator=${2:-"eq"}
|
||||
local latest=$3
|
||||
local diff_msg=${4:-""}
|
||||
local res=""
|
||||
|
||||
if [ ! -f "${original}" ]; then
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo "the file '${original}' does not exit"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -f "${latest}" ]; then
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo "the file '${latest}' does not exit"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "${comparator}" == "exact" ]; then
|
||||
# Skip sorting of file content for exact comparison.
|
||||
cp "${original}" "${original}.sorted"
|
||||
cp "${latest}" "${latest}.sorted"
|
||||
else
|
||||
sort "${original}" > "${original}.sorted"
|
||||
sort "${latest}" > "${latest}.sorted"
|
||||
fi
|
||||
|
||||
if [ "${comparator}" == "eq" ] || [ "${comparator}" == "exact" ]; then
|
||||
if [ "$(diff -iwB "${original}".sorted "${latest}".sorted)" == "" ] ; then
|
||||
echo -n "${green}"
|
||||
echo "Successful: ${diff_msg}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo " Expected: "
|
||||
cat "${original}"
|
||||
echo " Got: "
|
||||
cat "${latest}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
if [ -n "$(diff -iwB "${original}".sorted "${latest}".sorted)" ] ; then
|
||||
echo -n "${green}"
|
||||
echo "Successful: ${diff_msg}"
|
||||
echo -n "${reset}"
|
||||
return 0
|
||||
else
|
||||
echo "${bold}${red}"
|
||||
echo "FAIL! ${diff_msg}"
|
||||
echo " Expected: "
|
||||
cat "${original}"
|
||||
echo " Got: "
|
||||
cat "${latest}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
return 1
|
||||
# Define a function to register, get a token and check the account
|
||||
openim::register_and_check()
|
||||
{
|
||||
# Register a user
|
||||
user_register_response=$(openim::register_user)
|
||||
|
||||
if [[ $user_register_response == *"errCode": 0* ]]; then
|
||||
echo "User registration successful."
|
||||
|
||||
# Get token
|
||||
token=$(openim::get_token)
|
||||
|
||||
if [[ -n $token ]]; then
|
||||
echo "Token acquired: $token"
|
||||
|
||||
# Check account
|
||||
account_check_response=$(openim::check_account $token)
|
||||
|
||||
if [[ $account_check_response == *"errCode": 0* ]]; then
|
||||
echo "Account check successful."
|
||||
else
|
||||
echo "Account check failed."
|
||||
fi
|
||||
else
|
||||
echo "Failed to acquire token."
|
||||
fi
|
||||
else
|
||||
echo "User registration failed."
|
||||
fi
|
||||
}
|
||||
|
||||
# Force exact match of kubectl stdout, stderr, and return code.
|
||||
# $1: file with actual stdout
|
||||
# $2: file with actual stderr
|
||||
# $3: the actual return code
|
||||
# $4: file with expected stdout
|
||||
# $5: file with expected stderr
|
||||
# $6: expected return code
|
||||
# $7: additional message describing the invocation
|
||||
openim::test::results::diff() {
|
||||
local actualstdout=$1
|
||||
local actualstderr=$2
|
||||
local actualcode=$3
|
||||
local expectedstdout=$4
|
||||
local expectedstderr=$5
|
||||
local expectedcode=$6
|
||||
local message=$7
|
||||
local result=0
|
||||
|
||||
if ! openim::test::version::diff_assert "${expectedstdout}" "exact" "${actualstdout}" "stdout for ${message}"; then
|
||||
result=1
|
||||
fi
|
||||
if ! openim::test::version::diff_assert "${expectedstderr}" "exact" "${actualstderr}" "stderr for ${message}"; then
|
||||
result=1
|
||||
fi
|
||||
if [ "${actualcode}" -ne "${expectedcode}" ]; then
|
||||
echo "${bold}${red}"
|
||||
echo "$(openim::test::get_caller): FAIL!"
|
||||
echo "Return code for ${message}"
|
||||
echo " Expected: ${expectedcode}"
|
||||
echo " Got: ${actualcode}"
|
||||
echo "${reset}${red}"
|
||||
caller
|
||||
echo "${reset}"
|
||||
result=1
|
||||
openim::test::secret()
|
||||
{
|
||||
token="-HAuthorization: Bearer $(openim::test::login)"
|
||||
|
||||
# 1. 如果有 secret0 密钥先清空
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
|
||||
|
||||
# 2. 创建 secret0 密钥
|
||||
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets \
|
||||
-d'{"metadata":{"name":"secret0"},"expires":0,"description":"admin secret"}'; echo
|
||||
|
||||
# 3. 列出所有密钥
|
||||
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets; echo
|
||||
|
||||
# 4. 获取 secret0 密钥的详细信息
|
||||
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
|
||||
|
||||
# 5. 修改 secret0 密钥
|
||||
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0 \
|
||||
-d'{"expires":0,"description":"admin secret(modified)"}'; echo
|
||||
|
||||
# 6. 删除 secret0 密钥
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/secret0; echo
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/secret test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::policy()
|
||||
{
|
||||
token="-HAuthorization: Bearer $(openim::test::login)"
|
||||
|
||||
# 1. 如果有 policy0 策略先清空
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
|
||||
|
||||
# 2. 创建 policy0 策略
|
||||
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies \
|
||||
-d'{"metadata":{"name":"policy0"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
|
||||
|
||||
# 3. 列出所有策略
|
||||
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies; echo
|
||||
|
||||
# 4. 获取 policy0 策略的详细信息
|
||||
${RCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
|
||||
|
||||
# 5. 修改 policy0 策略
|
||||
${UCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0 \
|
||||
-d'{"policy":{"description":"One policy to rule them all(modified).","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
|
||||
|
||||
# 6. 删除 policy0 策略
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/policy0; echo
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/policy test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::apiserver()
|
||||
{
|
||||
openim::test::user
|
||||
openim::test::secret
|
||||
openim::test::policy
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, openim-apiserver test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::authz()
|
||||
{
|
||||
token="-HAuthorization: Bearer $(openim::test::login)"
|
||||
|
||||
# 1. 如果有 authzpolicy 策略先清空
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/policies/authzpolicy; echo
|
||||
|
||||
# 2. 创建 authzpolicy 策略
|
||||
${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/policies \
|
||||
-d'{"metadata":{"name":"authzpolicy"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}'; echo
|
||||
|
||||
# 3. 如果有 authzsecret 密钥先清空
|
||||
${DCURL} "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets/authzsecret; echo
|
||||
|
||||
# 4. 创建 authzsecret 密钥
|
||||
secret=$(${CCURL} "${Header}" "${token}" http://${INSECURE_OPENIMAPI}/v1/secrets -d'{"metadata":{"name":"authzsecret"},"expires":0,"description":"admin secret"}')
|
||||
secretID=$(echo ${secret} | grep -Po 'secretID[" :]+\K[^"]+')
|
||||
secretKey=$(echo ${secret} | grep -Po 'secretKey[" :]+\K[^"]+')
|
||||
|
||||
# 5. 生成 token
|
||||
token=$(iamctl jwt sign ${secretID} ${secretKey})
|
||||
|
||||
# 6. 调用 /v1/authz 完成资源授权。
|
||||
# 注意这里要 sleep 3s 等待 openim-authz-server 将新建的密钥同步到其内存中
|
||||
echo "wait 3s to allow openim-authz-server to sync information into its memory ..."
|
||||
sleep 3
|
||||
ret=`$CCURL "${Header}" -H"Authorization: Bearer ${token}" http://${INSECURE_OPENIMAUTO}/v1/authz \
|
||||
-d'{"subject":"users:maria","action":"delete","resource":"resources:articles:ladon-introduction","context":{"remoteIPAddress":"192.168.0.5"}}' | grep -Po 'allowed[" :]+\K\w+'`
|
||||
|
||||
if [ "$ret" != "true" ];then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "${result}" -eq 0 ]; then
|
||||
echo -n "${green}"
|
||||
echo "$(openim::test::get_caller): Successful: ${message}"
|
||||
echo -n "${reset}"
|
||||
fi
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, /v1/authz test passed!\033[0m')"
|
||||
}
|
||||
|
||||
return "$result"
|
||||
}
|
||||
openim::test::authzserver()
|
||||
{
|
||||
openim::test::authz
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, openim-authz-server test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::pump()
|
||||
{
|
||||
${RCURL} http://${IAM_PUMP_HOST}:7070/healthz | egrep -q 'status.*ok' || {
|
||||
openim::log::error "cannot access openim-pump healthz api, openim-pump maybe down"
|
||||
return 1
|
||||
}
|
||||
|
||||
openim::test::real_pump_test
|
||||
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, openim-pump test passed!\033[0m')"
|
||||
}
|
||||
|
||||
# 使用真实的数据测试 openim-pump 是否正常工作
|
||||
openim::test::real_pump_test()
|
||||
{
|
||||
# 1. 创建访问 openim-authz-server 需要用到的密钥对
|
||||
iamctl secret create pumptest &>/dev/null
|
||||
|
||||
# 2. 使用步骤 1 创建的密钥对生成 JWT Token
|
||||
authzAccessToken=`iamctl jwt sign njcho8gJQArsq7zr5v1YpG5NcvL0aeuZ38Ti if70HgRgp021iq5ex2l7pfy5XvgtZM3q` # iamctl jwt sign $secretID $secretKey
|
||||
|
||||
# 3. 创建授权策略
|
||||
iamctl policy create pumptest '{"metadata":{"name":"policy0"},"policy":{"description":"One policy to rule them all.","subjects":["users:<peter|ken>","users:maria","groups:admins"],"actions":["delete","<create|update>"],"effect":"allow","resources":["resources:articles:<.*>","resources:printer"],"conditions":{"remoteIPAddress":{"type":"CIDRCondition","options":{"cidr":"192.168.0.1/16"}}}}}' &>/dev/null
|
||||
|
||||
# 注意这里要 sleep 3s 等待 openim-authz-server 将新建的密钥和授权策略同步到其内存中
|
||||
echo "wait 3s to allow openim-authz-server to sync information into its memory ..."
|
||||
sleep 3
|
||||
|
||||
# 4. 访问 /v1/authz 接口进行资源授权
|
||||
$CCURL "${Header}" -H"Authorization: Bearer ${token}" http://${INSECURE_OPENIMAUTO}/v1/authz \
|
||||
-d'{"subject":"users:maria","action":"delete","resource":"resources:articles:ladon-introduction","context":{"remoteIPAddress":"192.168.0.5"}}' &>/dev/null
|
||||
|
||||
# 这里要 sleep 5s,等待 openim-pump 将 Redis 中的日志,分析并转存到 MongoDB 中
|
||||
echo "wait 10s to allow openim-pump analyze and dump authorization log into MongoDB ..."
|
||||
sleep 10
|
||||
|
||||
# 5. 查看 MongoDB 中是否有经过解析后的授权日志。
|
||||
echo "db.iam_analytics.find()" | mongosh --quiet "${IAM_PUMP_MONGO_URL}" | grep -q "allow access" || {
|
||||
openim::log::error "cannot find analyzed authorization log in MongoDB"
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
openim::test::watcher()
|
||||
{
|
||||
${RCURL} http://${IAM_WATCHER_HOST}:5050/healthz | egrep -q 'status.*ok' || {
|
||||
openim::log::error "cannot access openim-watcher healthz api, openim-watcher maybe down"
|
||||
return 1
|
||||
}
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, openim-watcher test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::iamctl()
|
||||
{
|
||||
iamctl user list | egrep -q admin || {
|
||||
openim::log::error "iamctl cannot list users from openim-apiserver"
|
||||
return 1
|
||||
}
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, iamctl test passed!\033[0m')"
|
||||
}
|
||||
|
||||
openim::test::man()
|
||||
{
|
||||
man openim-apiserver | grep -q 'OPENIM API Server' || {
|
||||
openim::log::error "openim man page not installed or may not installed properly"
|
||||
return 1
|
||||
}
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, man test passed!\033[0m')"
|
||||
}
|
||||
|
||||
# OpenIM Smoke Test
|
||||
openim::test::smoke()
|
||||
{
|
||||
openim::test::apiserver
|
||||
openim::test::authzserver
|
||||
openim::test::pump
|
||||
openim::test::watcher
|
||||
openim::test::iamctl
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, smoke test passed!\033[0m')"
|
||||
}
|
||||
|
||||
# OpenIM Test
|
||||
openim::test::test()
|
||||
{
|
||||
openim::test::smoke
|
||||
openim::test::man
|
||||
|
||||
openim::log::info "$(echo -e '\033[32mcongratulations, all test passed!\033[0m')"
|
||||
}
|
||||
|
||||
if [[ "$*" =~ openim::test:: ]];then
|
||||
eval $*
|
||||
fi
|
||||
|
||||
@ -102,27 +102,24 @@ endif
|
||||
# The OS can be linux/windows/darwin when building binaries
|
||||
PLATFORMS ?= linux_s390x linux_mips64 linux_mips64le darwin_amd64 windows_amd64 linux_amd64 linux_arm64 linux_ppc64le # wasip1_wasm
|
||||
|
||||
# only support linux
|
||||
GOOS=linux
|
||||
|
||||
# set a specific PLATFORM, defaults to the host platform
|
||||
ifeq ($(origin PLATFORM), undefined)
|
||||
ifeq ($(origin GOARCH), undefined)
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
endif
|
||||
ifeq ($(origin GOARCH), undefined)
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
endif
|
||||
# Determine the host OS
|
||||
GOOS := $(shell go env GOOS)
|
||||
PLATFORM := $(GOOS)_$(GOARCH)
|
||||
# Use linux as the default OS when building images
|
||||
IMAGE_PLAT := linux_$(GOARCH)
|
||||
# Use the host OS and GOARCH as the default when building images
|
||||
IMAGE_PLAT := $(PLATFORM)
|
||||
else
|
||||
# such as: PLATFORM = linux_amd64
|
||||
# Parse the PLATFORM variable
|
||||
GOOS := $(word 1, $(subst _, ,$(PLATFORM)))
|
||||
GOARCH := $(word 2, $(subst _, ,$(PLATFORM)))
|
||||
IMAGE_PLAT := $(PLATFORM)
|
||||
endif
|
||||
|
||||
|
||||
# Protobuf file storage path
|
||||
APIROOT=$(ROOT_DIR)/pkg/proto
|
||||
|
||||
|
||||
@ -8,11 +8,11 @@ If you encounter any problems during its usage, please create an issue in the [G
|
||||
|
||||
**Here are some ways to get involved with the OpenIM community:**
|
||||
|
||||
📢 **Slack Channel**: Join our Slack channels for discussions, communication, and support. Click [here](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q) to join the Open-IM-Server Slack team channel.
|
||||
📢 **Slack Channel**: Join our Slack channels for discussions, communication, and support. Click [here](https://openimsdk.slack.com) to join the Open-IM-Server Slack team channel.
|
||||
|
||||
📧 **Gmail Contact**: If you have any questions, suggestions, or feedback for our open-source projects, please feel free to [contact us via email](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=winxu81@gmail.com).
|
||||
📧 **Gmail Contact**: If you have any questions, suggestions, or feedback for our open-source projects, please feel free to [contact us via email](https://mail.google.com/mail/?view=cm&fs=1&tf=1&to=info@openim.io).
|
||||
|
||||
📖 **Blog**: Stay up-to-date with OpenIM-Server projects and trends by reading our [blog](https://doc.rentsoft.cn/). We share the latest developments, tech trends, and other interesting information related to OpenIM.
|
||||
📖 **Blog**: Stay up-to-date with OpenIM-Server projects and trends by reading our [blog](https://openim.io/). We share the latest developments, tech trends, and other interesting information related to OpenIM.
|
||||
|
||||
📱 **WeChat**: Add us on WeChat (QR Code) and indicate that you are a user or developer of Open-IM-Server. We'll process your request as soon as possible.
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
rawJWT := `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJpYW0uYXV0aHoubWFybW90ZWR1LmNvbSIsImV4cCI6MTYwNDEyODQwMywiaWF0IjoxNjA0MTI4NDAyLCJpc3MiOiJpYW1jdGwiLCJraWQiOiJpZDEifQ.Itr5u4C-nTeA01qbjjl7RzuPD-aSQazsJZY_Z25aGnI`
|
||||
rawJWT := `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJVc2VySUQiOiI4MjkzODEzMTgzIiwiUGxhdGZvcm1JRCI6NSwiZXhwIjoxNzA2NTk0MTU0LCJuYmYiOjE2OTg4MTc4NTQsImlhdCI6MTY5ODgxODE1NH0.QCJHzU07SC6iYBoFO6Zsm61TNDor2D89I4E3zg8HHHU`
|
||||
|
||||
// Verify the token
|
||||
claims := &jwt.MapClaims{}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user