mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-10-28 14:42:14 +08:00
Merge branch 'openimsdk:main' into main
This commit is contained in:
commit
156227bf46
3
.env
3
.env
@ -5,6 +5,9 @@ ZOOKEEPER_IMAGE=bitnami/zookeeper:3.8
|
||||
KAFKA_IMAGE=bitnami/kafka:3.5.1
|
||||
MINIO_IMAGE=minio/minio:RELEASE.2024-01-11T07-46-16Z
|
||||
ETCD_IMAGE=quay.io/coreos/etcd:v3.5.13
|
||||
PROMETHEUS_IMAGE=prom/prometheus:v2.45.6
|
||||
ALERTMANAGER_IMAGE=prom/alertmanager:v0.27.0
|
||||
GRAFANA_IMAGE=grafana/grafana:11.0.1
|
||||
|
||||
OPENIM_WEB_FRONT_IMAGE=openim/openim-web-front:release-v3.5.1
|
||||
OPENIM_ADMIN_FRONT_IMAGE=openim/openim-admin-front:release-v1.7
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -34,11 +34,7 @@ deployments/charts/generated-configs/
|
||||
### OpenIM Config ###
|
||||
.env
|
||||
config/config.yaml
|
||||
config/alertmanager.yml
|
||||
config/prometheus.yml
|
||||
config/email.tmpl
|
||||
config/notification.yaml
|
||||
config/instance-down-rules.yml
|
||||
|
||||
### OpenIM deploy ###
|
||||
deployments/openim-server/charts
|
||||
|
||||
@ -25,5 +25,4 @@ func main() {
|
||||
if err := cmd.NewApiCmd().Exec(); err != nil {
|
||||
program.ExitWithError(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
25
config/alertmanager.yml
Normal file
25
config/alertmanager.yml
Normal file
@ -0,0 +1,25 @@
|
||||
global:
|
||||
resolve_timeout: 5m
|
||||
smtp_from: alert@openim.io
|
||||
smtp_smarthost: smtp.163.com:465
|
||||
smtp_auth_username: alert@openim.io
|
||||
smtp_auth_password: YOURAUTHPASSWORD
|
||||
smtp_require_tls: false
|
||||
smtp_hello: xxx
|
||||
|
||||
templates:
|
||||
- /etc/alertmanager/email.tmpl
|
||||
|
||||
route:
|
||||
group_by: ['alertname']
|
||||
group_wait: 5s
|
||||
group_interval: 5s
|
||||
repeat_interval: 5m
|
||||
receiver: email
|
||||
receivers:
|
||||
- name: email
|
||||
email_configs:
|
||||
- to: 'alert@example.com'
|
||||
html: '{{ template "email.to.html" . }}'
|
||||
headers: { Subject: "[OPENIM-SERVER]Alarm" }
|
||||
send_resolved: true
|
||||
16
config/email.tmpl
Normal file
16
config/email.tmpl
Normal file
@ -0,0 +1,16 @@
|
||||
{{ define "email.to.html" }}
|
||||
{{ range .Alerts }}
|
||||
<!-- Begin of OpenIM Alert -->
|
||||
<div style="border:1px solid #ccc; padding:10px; margin-bottom:10px;">
|
||||
<h3>OpenIM Alert</h3>
|
||||
<p><strong>Alert Program:</strong> Prometheus Alert</p>
|
||||
<p><strong>Severity Level:</strong> {{ .Labels.severity }}</p>
|
||||
<p><strong>Alert Type:</strong> {{ .Labels.alertname }}</p>
|
||||
<p><strong>Affected Host:</strong> {{ .Labels.instance }}</p>
|
||||
<p><strong>Affected Service:</strong> {{ .Labels.job }}</p>
|
||||
<p><strong>Alert Subject:</strong> {{ .Annotations.summary }}</p>
|
||||
<p><strong>Trigger Time:</strong> {{ .StartsAt.Format "2006-01-02 15:04:05" }}</p>
|
||||
</div>
|
||||
<!-- End of OpenIM Alert -->
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
22
config/instance-down-rules.yml
Normal file
22
config/instance-down-rules.yml
Normal file
@ -0,0 +1,22 @@
|
||||
groups:
|
||||
- name: instance_down
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes."
|
||||
|
||||
- name: database_insert_failure_alerts
|
||||
rules:
|
||||
- alert: DatabaseInsertFailed
|
||||
expr: (increase(msg_insert_redis_failed_total[5m]) > 0) or (increase(msg_insert_mongo_failed_total[5m]) > 0)
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: "Increase in MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter detected"
|
||||
description: "Either MsgInsertRedisFailedCounter or MsgInsertMongoFailedCounter has increased in the last 5 minutes, indicating failures in message insert operations to Redis or MongoDB,maybe the redis or mongodb is crash."
|
||||
@ -1,2 +1,3 @@
|
||||
chatRecordsClearTime: "0 2 * * *"
|
||||
cronExecuteTime: "0 2 * * *"
|
||||
retainChatRecords: 365
|
||||
fileExpireTime: 90
|
||||
|
||||
@ -29,4 +29,12 @@ object:
|
||||
accessKeyID: ''
|
||||
accessKeySecret: ''
|
||||
sessionToken: ''
|
||||
publicRead: false
|
||||
kodo:
|
||||
endpoint: "http://s3.cn-south-1.qiniucs.com"
|
||||
bucket: "kodo-bucket-test"
|
||||
bucketURL: "http://kodo-bucket-test-oetobfb.qiniudns.com"
|
||||
accessKeyID: ''
|
||||
accessKeySecret: ''
|
||||
sessionToken: ''
|
||||
publicRead: false
|
||||
83
config/prometheus.yml
Normal file
83
config/prometheus.yml
Normal file
@ -0,0 +1,83 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets: ['192.168.2.22:19093']
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
- "instance-down-rules.yml"
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label "job='job_name'"" to any timeseries scraped from this config.
|
||||
# Monitored information captured by prometheus
|
||||
|
||||
# prometheus fetches application services
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20114' ]
|
||||
- job_name: 'openimserver-openim-api'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20113' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msggateway'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20112' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-msgtransfer'
|
||||
static_configs:
|
||||
- targets: [ 192.168.2.22:20111, 192.168.2.22:20110, 192.168.2.22:20109, 192.168.2.22:20108 ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-push'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20107' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-auth'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20106' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-conversation'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20105' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-friend'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20104' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-group'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20103' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-msg'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20102' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-third'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20101' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
- job_name: 'openimserver-openim-rpc-user'
|
||||
static_configs:
|
||||
- targets: [ '192.168.2.22:20100' ]
|
||||
labels:
|
||||
namespace: 'default'
|
||||
@ -140,5 +140,50 @@ services:
|
||||
networks:
|
||||
- openim
|
||||
|
||||
prometheus:
|
||||
image: ${PROMETHEUS_IMAGE}
|
||||
container_name: prometheus
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
|
||||
- ${DATA_DIR}/components/prometheus/data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
ports:
|
||||
- "19091:9090"
|
||||
networks:
|
||||
- openim
|
||||
|
||||
alertmanager:
|
||||
image: ${ALERTMANAGER_IMAGE}
|
||||
container_name: alertmanager
|
||||
restart: always
|
||||
volumes:
|
||||
- ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
||||
- ./config/email.tmpl:/etc/alertmanager/email.tmpl
|
||||
ports:
|
||||
- "19093:9093"
|
||||
networks:
|
||||
- openim
|
||||
|
||||
grafana:
|
||||
image: ${GRAFANA_IMAGE}
|
||||
container_name: grafana
|
||||
user: root
|
||||
restart: always
|
||||
environment:
|
||||
- GF_SECURITY_ALLOW_EMBEDDING=true
|
||||
- GF_SESSION_COOKIE_SAMESITE=none
|
||||
- GF_SESSION_COOKIE_SECURE=true
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
ports:
|
||||
- "13000:3000"
|
||||
volumes:
|
||||
- ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
|
||||
networks:
|
||||
- openim
|
||||
|
||||
|
||||
|
||||
25
go.mod
25
go.mod
@ -12,8 +12,8 @@ require (
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.17
|
||||
github.com/openimsdk/tools v0.0.49-alpha.45
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.30
|
||||
github.com/openimsdk/tools v0.0.49-alpha.50
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
@ -53,6 +53,24 @@ require (
|
||||
cloud.google.com/go/longrunning v0.5.4 // indirect
|
||||
cloud.google.com/go/storage v1.36.0 // indirect
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.23.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect
|
||||
github.com/aws/smithy-go v1.17.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
@ -118,6 +136,7 @@ require (
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/qiniu/go-sdk/v7 v7.18.2 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
@ -175,5 +194,3 @@ require (
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
)
|
||||
|
||||
//replace github.com/openimsdk/protocol => /Users/chao/Desktop/project/protocol
|
||||
|
||||
77
go.sum
77
go.sum
@ -21,6 +21,42 @@ github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP
|
||||
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.23.1 h1:qXaFsOOMA+HsZtX8WoCa+gJnbyW7qyFFBlPqvTSzbaI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.23.1/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 h1:ZY3108YtBNq96jNZTICHxN1gSBSbnvIdYwwqnvCV4Mc=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.4 h1:r+X1x8QI6FEPdJDWCNBDZHyAcyFwSjHN8q8uuus+Axs=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.25.4/go.mod h1:8GTjImECskr7D88P/Nn9uM4M4rLY9i77hLJZgkZEWV8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.3 h1:8PeI2krzzjDJ5etmgaMiD1JswsrLrWvKKu/uBUtNy1g=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.3/go.mod h1:Kdh/okh+//vQ/AjEt81CjvkTo64+/zIE4OewP7RpfXk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 h1:KehRNiVzIfAcj6gw98zotVbb/K67taJE0fkfgM6vzqU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5/go.mod h1:VhnExhw6uXy9QzetvpXDolo1/hjhx4u9qukBGkuUwjs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 h1:LAm3Ycm9HJfbSCd5I+wqC2S9Ej7FPrgr5CQoOljJZcE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4/go.mod h1:xEhvbJcyUf/31yfGSQBe01fukXwXJ0gxDp7rLfymWE0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 h1:4GV0kKZzUxiWxSVpn/9gwR0g21NF1Jsyduzo9rHgC/Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4/go.mod h1:dYvTNAggxDZy6y1AF7YDwXsPuHFy/VNEpEI/2dWK9IU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 h1:40Q4X5ebZruRtknEZH/bg91sT5pR853F7/1X9QRbI54=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4/go.mod h1:u77N7eEECzUv7F0xl2gcfK/vzc8wcjWobpy+DcrLJ5E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 h1:6DRKQc+9cChgzL5gplRGusI5dBGeiEod4m/pmGbcX48=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4/go.mod h1:s8ORvrW4g4v7IvYKIAoBg17w3GQ+XuwXDXYrQ5SkzU0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 h1:rdovz3rEu0vZKbzoMYPTehp0E8veoE9AyfzqCr5Eeao=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4/go.mod h1:aYCGNjyUCUelhofxlZyj63srdxWUSsBSGg5l6MCuXuE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 h1:o3DcfCxGDIT20pTbVKVhp3vWXOj/VvgazNJvumWeYW0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4/go.mod h1:Uy0KVOxuTK2ne+/PKQ+VvEeWmjMMksE17k/2RK/r5oM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1 h1:1w11lfXOa8HoHoSlNtt4mqv/N3HmDOa+OnUH3Y9DHm8=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.43.1/go.mod h1:dqJ5JBL0clzgHriH35Amx3LRFY6wNIPUX7QO/BerSBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 h1:CdsSOGlFF3Pn+koXOIpTtvX7st0IuGsZ8kJqcWMlX54=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.17.3/go.mod h1:oA6VjNsLll2eVuUoF2D+CMyORgNzPEW/3PyUdq6WQjI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 h1:cbRqFTVnJV+KRpwFl76GJdIZJKKCdTPnjUZ7uWh3pIU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1/go.mod h1:hHL974p5auvXlZPIjJTblXJpbkfK4klBczlsEaMCGVY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 h1:yEvZ4neOQ/KpUqyR+X0ycUTW/kVRNR4nDZ38wStHGAA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.25.4/go.mod h1:feTnm2Tk/pJxdX+eooEsxvlvTWBvDm6CasRZ+JOs2IY=
|
||||
github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI=
|
||||
github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@ -49,6 +85,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@ -95,12 +132,18 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
|
||||
github.com/go-playground/validator/v10 v10.18.0 h1:BvolUXjp4zuvkZ5YN5t7ebzbhlUtPsPm2S9NAZ5nl9U=
|
||||
github.com/go-playground/validator/v10 v10.18.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
@ -214,10 +257,16 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/likexian/gokit v0.25.13 h1:p2Uw3+6fGG53CwdU2Dz0T6bOycdb2+bAFAa3ymwWVkM=
|
||||
@ -262,14 +311,15 @@ github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
|
||||
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/openimsdk/gomake v0.0.14-alpha.5 h1:VY9c5x515lTfmdhhPjMvR3BBRrRquAUCFsz7t7vbv7Y=
|
||||
github.com/openimsdk/gomake v0.0.14-alpha.5/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.17 h1:pEag4ZdlovE+AyLsw1VYFU/3sk6ayvGdPzgufQfKf9M=
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.17/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
|
||||
github.com/openimsdk/tools v0.0.49-alpha.45 h1:XIzCoef4myybOiIlGuRY9FTtGBisZFC4Uy4PhG0ZWQ0=
|
||||
github.com/openimsdk/tools v0.0.49-alpha.45/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8=
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.30 h1:OXzCIpDpIY/GI6h1SDYWN51OS9Xv/BcHaOwq8whPKqI=
|
||||
github.com/openimsdk/protocol v0.0.69-alpha.30/go.mod h1:OZQA9FR55lseYoN2Ql1XAHYKHJGu7OMNkUbuekrKCM8=
|
||||
github.com/openimsdk/tools v0.0.49-alpha.50 h1:7CaYLVtsBU5kyiTetUOuOkO5FFFmMvSzBEfh2tfCn90=
|
||||
github.com/openimsdk/tools v0.0.49-alpha.50/go.mod h1:HtSRjPTL8PsuZ+PhR5noqzrYBF0sdwW3/O/sWVucWg8=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@ -286,12 +336,18 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
|
||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk=
|
||||
github.com/qiniu/go-sdk/v7 v7.18.2 h1:vk9eo5OO7aqgAOPF0Ytik/gt7CMKuNgzC/IPkhda6rk=
|
||||
github.com/qiniu/go-sdk/v7 v7.18.2/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
|
||||
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=
|
||||
github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
@ -324,6 +380,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
@ -396,7 +453,9 @@ golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
@ -423,6 +482,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
@ -445,20 +505,26 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
@ -515,8 +581,10 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
@ -527,6 +595,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.8 h1:WAGEZ/aEcznN4D03laj8DKnehe1e9gYQAjW8xyPRdeo=
|
||||
|
||||
@ -50,3 +50,15 @@ func (o *ConversationApi) SetConversations(c *gin.Context) {
|
||||
func (o *ConversationApi) GetConversationOfflinePushUserIDs(c *gin.Context) {
|
||||
a2r.Call(conversation.ConversationClient.GetConversationOfflinePushUserIDs, o.Client, c)
|
||||
}
|
||||
|
||||
func (o *ConversationApi) GetFullOwnerConversationIDs(c *gin.Context) {
|
||||
a2r.Call(conversation.ConversationClient.GetFullOwnerConversationIDs, o.Client, c)
|
||||
}
|
||||
|
||||
func (o *ConversationApi) GetIncrementalConversation(c *gin.Context) {
|
||||
a2r.Call(conversation.ConversationClient.GetIncrementalConversation, o.Client, c)
|
||||
}
|
||||
|
||||
func (o *ConversationApi) GetOwnerConversation(c *gin.Context) {
|
||||
a2r.Call(conversation.ConversationClient.GetOwnerConversation, o.Client, c)
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/relation"
|
||||
"github.com/openimsdk/tools/a2r"
|
||||
@ -57,7 +58,6 @@ func (o *FriendApi) GetFriendList(c *gin.Context) {
|
||||
|
||||
func (o *FriendApi) GetDesignatedFriends(c *gin.Context) {
|
||||
a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c)
|
||||
//a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c, a2r.NewNilReplaceOption(relation.FriendClient.GetDesignatedFriends))
|
||||
}
|
||||
|
||||
func (o *FriendApi) SetFriendRemark(c *gin.Context) {
|
||||
@ -100,6 +100,8 @@ func (o *FriendApi) GetIncrementalFriends(c *gin.Context) {
|
||||
a2r.Call(relation.FriendClient.GetIncrementalFriends, o.Client, c)
|
||||
}
|
||||
|
||||
// GetIncrementalBlacks is temporarily unused.
|
||||
// Deprecated: This function is currently unused and may be removed in future versions.
|
||||
func (o *FriendApi) GetIncrementalBlacks(c *gin.Context) {
|
||||
a2r.Call(relation.FriendClient.GetIncrementalBlacks, o.Client, c)
|
||||
}
|
||||
|
||||
@ -29,7 +29,6 @@ import (
|
||||
"time"
|
||||
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
@ -72,10 +71,8 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
|
||||
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
|
||||
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("prometheus start err: %d", prometheusPort))
|
||||
if err := prommetrics.ApiInit(prometheusPort); err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -2,12 +2,13 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gin-gonic/gin/binding"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -15,10 +16,25 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func prommetricsGin() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Next()
|
||||
path := c.FullPath()
|
||||
if c.Writer.Status() == http.StatusNotFound {
|
||||
prommetrics.HttpCall("<404>", c.Request.Method, c.Writer.Status())
|
||||
} else {
|
||||
prommetrics.HttpCall(path, c.Request.Method, c.Writer.Status())
|
||||
}
|
||||
if resp := apiresp.GetGinApiResponse(c); resp != nil {
|
||||
prommetrics.APICall(path, c.Request.Method, resp.ErrCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine {
|
||||
disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
@ -37,7 +53,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
|
||||
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
|
||||
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
|
||||
|
||||
r.Use(gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
|
||||
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
|
||||
u := NewUserApi(*userRpc)
|
||||
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
|
||||
userRouterGroup := r.Group("/user")
|
||||
@ -118,9 +134,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
|
||||
groupRouterGroup.POST("/get_group_abstract_info", g.GetGroupAbstractInfo)
|
||||
groupRouterGroup.POST("/get_groups", g.GetGroups)
|
||||
groupRouterGroup.POST("/get_group_member_user_id", g.GetGroupMemberUserIDs)
|
||||
groupRouterGroup.POST("/get_incremental_join_group", g.GetIncrementalJoinGroup)
|
||||
groupRouterGroup.POST("/get_incremental_group_member", g.GetIncrementalGroupMember)
|
||||
groupRouterGroup.POST("/get_incremental_group_member_batch", g.GetIncrementalGroupMemberBatch)
|
||||
groupRouterGroup.POST("/get_incremental_join_groups", g.GetIncrementalJoinGroup)
|
||||
groupRouterGroup.POST("/get_incremental_group_members", g.GetIncrementalGroupMember)
|
||||
groupRouterGroup.POST("/get_incremental_group_members_batch", g.GetIncrementalGroupMemberBatch)
|
||||
groupRouterGroup.POST("/get_full_group_member_user_ids", g.GetFullGroupMemberUserIDs)
|
||||
groupRouterGroup.POST("/get_full_join_group_ids", g.GetFullJoinGroupIDs)
|
||||
}
|
||||
@ -192,6 +208,9 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
|
||||
conversationGroup.POST("/get_conversations", c.GetConversations)
|
||||
conversationGroup.POST("/set_conversations", c.SetConversations)
|
||||
conversationGroup.POST("/get_conversation_offline_push_user_ids", c.GetConversationOfflinePushUserIDs)
|
||||
conversationGroup.POST("/get_full_conversation_ids", c.GetFullOwnerConversationIDs)
|
||||
conversationGroup.POST("/get_incremental_conversations", c.GetIncrementalConversation)
|
||||
conversationGroup.POST("/get_owner_conversation", c.GetOwnerConversation)
|
||||
}
|
||||
|
||||
statisticsGroup := r.Group("/statistics")
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
@ -72,6 +73,10 @@ type Client struct {
|
||||
closed atomic.Bool
|
||||
closedErr error
|
||||
token string
|
||||
hbCtx context.Context
|
||||
hbCancel context.CancelFunc
|
||||
subLock sync.Mutex
|
||||
subUserIDs map[string]struct{}
|
||||
}
|
||||
|
||||
// ResetClient updates the client's state with new connection and context information.
|
||||
@ -88,6 +93,7 @@ func (c *Client) ResetClient(ctx *UserConnContext, conn LongConn, longConnServer
|
||||
c.closed.Store(false)
|
||||
c.closedErr = nil
|
||||
c.token = ctx.GetToken()
|
||||
c.hbCtx, c.hbCancel = context.WithCancel(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Client) pingHandler(_ string) error {
|
||||
@ -98,6 +104,13 @@ func (c *Client) pingHandler(_ string) error {
|
||||
return c.writePongMsg()
|
||||
}
|
||||
|
||||
func (c *Client) pongHandler(_ string) error {
|
||||
if err := c.conn.SetReadDeadline(pongWait); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMessage continuously reads messages from the connection.
|
||||
func (c *Client) readMessage() {
|
||||
defer func() {
|
||||
@ -110,7 +123,9 @@ func (c *Client) readMessage() {
|
||||
|
||||
c.conn.SetReadLimit(maxMessageSize)
|
||||
_ = c.conn.SetReadDeadline(pongWait)
|
||||
c.conn.SetPongHandler(c.pongHandler)
|
||||
c.conn.SetPingHandler(c.pingHandler)
|
||||
c.activeHeartbeat(c.hbCtx)
|
||||
|
||||
for {
|
||||
log.ZDebug(c.ctx, "readMessage")
|
||||
@ -147,6 +162,7 @@ func (c *Client) readMessage() {
|
||||
case CloseMessage:
|
||||
c.closedErr = ErrClientClosed
|
||||
return
|
||||
|
||||
default:
|
||||
}
|
||||
}
|
||||
@ -202,6 +218,8 @@ func (c *Client) handleMessage(message []byte) error {
|
||||
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
|
||||
case WsSetBackgroundStatus:
|
||||
resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq)
|
||||
case WsSubUserOnlineStatus:
|
||||
resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d",
|
||||
@ -235,6 +253,7 @@ func (c *Client) close() {
|
||||
|
||||
c.closed.Store(true)
|
||||
c.conn.Close()
|
||||
c.hbCancel() // Close server-initiated heartbeat.
|
||||
c.longConnServer.UnRegister(c)
|
||||
}
|
||||
|
||||
@ -321,6 +340,44 @@ func (c *Client) writeBinaryMsg(resp Resp) error {
|
||||
return c.conn.WriteMessage(MessageBinary, encodedBuf)
|
||||
}
|
||||
|
||||
// Actively initiate Heartbeat when platform in Web.
|
||||
func (c *Client) activeHeartbeat(ctx context.Context) {
|
||||
if c.PlatformID == constant.WebPlatformID {
|
||||
go func() {
|
||||
log.ZDebug(ctx, "server initiative send heartbeat start.")
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := c.writePingMsg(); err != nil {
|
||||
log.ZWarn(c.ctx, "send Ping Message error.", err)
|
||||
return
|
||||
}
|
||||
case <-c.hbCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
func (c *Client) writePingMsg() error {
|
||||
if c.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.w.Lock()
|
||||
defer c.w.Unlock()
|
||||
|
||||
err := c.conn.SetWriteDeadline(writeWait)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.conn.WriteMessage(PingMessage, nil)
|
||||
}
|
||||
|
||||
func (c *Client) writePongMsg() error {
|
||||
if c.closed.Load() {
|
||||
return nil
|
||||
|
||||
@ -16,10 +16,10 @@ package msggateway
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func mockRandom() []byte {
|
||||
@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) {
|
||||
assert.Equal(b, nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
t.Log(unsafe.Sizeof(Client{}))
|
||||
|
||||
}
|
||||
|
||||
@ -43,6 +43,7 @@ const (
|
||||
WSKickOnlineMsg = 2002
|
||||
WsLogoutMsg = 2003
|
||||
WsSetBackgroundStatus = 2004
|
||||
WsSubUserOnlineStatus = 2005
|
||||
WSDataError = 3001
|
||||
)
|
||||
|
||||
@ -53,6 +54,9 @@ const (
|
||||
// Time allowed to read the next pong message from the peer.
|
||||
pongWait = 30 * time.Second
|
||||
|
||||
// Send pings to peer with this period. Must be less than pongWait.
|
||||
pingPeriod = (pongWait * 9) / 10
|
||||
|
||||
// Maximum message size allowed from peer.
|
||||
maxMessageSize = 51200
|
||||
)
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -31,6 +32,10 @@ import (
|
||||
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
s.LongConnServer.SetDiscoveryRegistry(disCov, config)
|
||||
msggateway.RegisterMsgGatewayServer(server, s)
|
||||
s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
if s.ready != nil {
|
||||
return s.ready(s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -50,18 +55,21 @@ type Server struct {
|
||||
LongConnServer LongConnServer
|
||||
config *Config
|
||||
pushTerminal map[int]struct{}
|
||||
ready func(srv *Server) error
|
||||
userRcp rpcclient.UserRpcClient
|
||||
}
|
||||
|
||||
func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
|
||||
s.LongConnServer = LongConnServer
|
||||
}
|
||||
|
||||
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
|
||||
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server {
|
||||
s := &Server{
|
||||
rpcPort: rpcPort,
|
||||
LongConnServer: longConnServer,
|
||||
pushTerminal: make(map[int]struct{}),
|
||||
config: conf,
|
||||
ready: ready,
|
||||
}
|
||||
s.pushTerminal[constant.IOSPlatformID] = struct{}{}
|
||||
s.pushTerminal[constant.AndroidPlatformID] = struct{}{}
|
||||
|
||||
@ -17,6 +17,8 @@ package msggateway
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"time"
|
||||
|
||||
@ -26,6 +28,7 @@ import (
|
||||
type Config struct {
|
||||
MsgGateway config.MsgGateway
|
||||
Share config.Share
|
||||
RedisConfig config.Redis
|
||||
WebhooksConfig config.Webhooks
|
||||
Discovery config.Discovery
|
||||
}
|
||||
@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
longServer, err := NewWsServer(
|
||||
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
longServer := NewWsServer(
|
||||
conf,
|
||||
WithPort(wsPort),
|
||||
WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
|
||||
WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
|
||||
WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hubServer := NewServer(rpcPort, longServer, conf)
|
||||
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
|
||||
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
|
||||
return nil
|
||||
})
|
||||
|
||||
go longServer.ChangeOnlineStatus(4)
|
||||
|
||||
netDone := make(chan error)
|
||||
go func() {
|
||||
err = hubServer.Start(ctx, index, conf)
|
||||
|
||||
@ -16,10 +16,11 @@ package msggateway
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/tools/apiresp"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/apiresp"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
)
|
||||
|
||||
112
internal/msggateway/online.go
Normal file
112
internal/msggateway/online.go
Normal file
@ -0,0 +1,112 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
|
||||
if concurrent < 1 {
|
||||
concurrent = 1
|
||||
}
|
||||
const renewalTime = cachekey.OnlineExpire / 3
|
||||
//const renewalTime = time.Second * 10
|
||||
renewalTicker := time.NewTicker(renewalTime)
|
||||
|
||||
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
|
||||
changeStatus := make([][]UserState, concurrent)
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
|
||||
changeStatus[i] = make([]UserState, 0, 100)
|
||||
}
|
||||
|
||||
mergeTicker := time.NewTicker(time.Second)
|
||||
|
||||
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
|
||||
return &pbuser.UserOnlineStatus{
|
||||
UserID: u.UserID,
|
||||
Online: u.Online,
|
||||
Offline: u.Offline,
|
||||
}
|
||||
}
|
||||
|
||||
rNum := rand.Uint64()
|
||||
pushUserState := func(us ...UserState) {
|
||||
for _, u := range us {
|
||||
sum := md5.Sum([]byte(u.UserID))
|
||||
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
|
||||
changeStatus[i] = append(changeStatus[i], u)
|
||||
status := changeStatus[i]
|
||||
if len(status) == cap(status) {
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pushAllUserState := func() {
|
||||
for i, status := range changeStatus {
|
||||
if len(status) == 0 {
|
||||
continue
|
||||
}
|
||||
req := &pbuser.SetUserOnlineStatusReq{
|
||||
Status: datautil.Slice(status, local2pb),
|
||||
}
|
||||
changeStatus[i] = status[:0]
|
||||
select {
|
||||
case requestChs[i] <- req:
|
||||
default:
|
||||
log.ZError(context.Background(), "user online processing is too slow", nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
|
||||
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
|
||||
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
|
||||
defer cancel()
|
||||
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
|
||||
log.ZError(ctx, "update user online status", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < concurrent; i++ {
|
||||
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
|
||||
for req := range ch {
|
||||
doRequest(req)
|
||||
}
|
||||
}(requestChs[i])
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-mergeTicker.C:
|
||||
pushAllUserState()
|
||||
case now := <-renewalTicker.C:
|
||||
deadline := now.Add(-cachekey.OnlineExpire / 3)
|
||||
users := ws.clients.GetAllUserStatus(deadline, now)
|
||||
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
|
||||
pushUserState(users...)
|
||||
case state := <-ws.clients.UserState():
|
||||
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
|
||||
pushUserState(state)
|
||||
}
|
||||
}
|
||||
}
|
||||
181
internal/msggateway/subscription.go
Normal file
181
internal/msggateway/subscription.go
Normal file
@ -0,0 +1,181 @@
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/idutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
|
||||
if ws.clients.RecvSubChange(userID, platformIDs) {
|
||||
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
|
||||
} else {
|
||||
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
|
||||
}
|
||||
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
|
||||
}
|
||||
|
||||
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
|
||||
var sub sdkws.SubUserOnlineStatus
|
||||
if err := proto.Unmarshal(data.Data, &sub); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
|
||||
var resp sdkws.SubUserOnlineStatusTips
|
||||
if len(sub.SubscribeUserID) > 0 {
|
||||
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
|
||||
for _, userID := range sub.SubscribeUserID {
|
||||
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
|
||||
UserID: userID,
|
||||
OnlinePlatformIDs: platformIDs,
|
||||
})
|
||||
}
|
||||
}
|
||||
return proto.Marshal(&resp)
|
||||
}
|
||||
|
||||
type subClient struct {
|
||||
clients map[string]*Client
|
||||
}
|
||||
|
||||
func newSubscription() *Subscription {
|
||||
return &Subscription{
|
||||
userIDs: make(map[string]*subClient),
|
||||
}
|
||||
}
|
||||
|
||||
type Subscription struct {
|
||||
lock sync.RWMutex
|
||||
userIDs map[string]*subClient
|
||||
}
|
||||
|
||||
func (s *Subscription) GetClient(userID string) []*Client {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
cs, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
clients := make([]*Client, 0, len(cs.clients))
|
||||
for _, client := range cs.clients {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
func (s *Subscription) DelClient(client *Client) {
|
||||
client.subLock.Lock()
|
||||
userIDs := datautil.Keys(client.subUserIDs)
|
||||
for _, userID := range userIDs {
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(userIDs) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for _, userID := range userIDs {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
|
||||
if len(addUserIDs)+len(delUserIDs) == 0 {
|
||||
return
|
||||
}
|
||||
var (
|
||||
del = make(map[string]struct{})
|
||||
add = make(map[string]struct{})
|
||||
)
|
||||
client.subLock.Lock()
|
||||
for _, userID := range delUserIDs {
|
||||
if _, ok := client.subUserIDs[userID]; !ok {
|
||||
continue
|
||||
}
|
||||
del[userID] = struct{}{}
|
||||
delete(client.subUserIDs, userID)
|
||||
}
|
||||
for _, userID := range addUserIDs {
|
||||
delete(del, userID)
|
||||
if _, ok := client.subUserIDs[userID]; ok {
|
||||
continue
|
||||
}
|
||||
client.subUserIDs[userID] = struct{}{}
|
||||
}
|
||||
client.subLock.Unlock()
|
||||
if len(del)+len(add) == 0 {
|
||||
return
|
||||
}
|
||||
addr := client.ctx.GetRemoteAddr()
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
for userID := range del {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(sub.clients, addr)
|
||||
if len(sub.clients) == 0 {
|
||||
delete(s.userIDs, userID)
|
||||
}
|
||||
}
|
||||
for userID := range add {
|
||||
sub, ok := s.userIDs[userID]
|
||||
if !ok {
|
||||
sub = &subClient{clients: make(map[string]*Client)}
|
||||
s.userIDs[userID] = sub
|
||||
}
|
||||
sub.clients[addr] = client
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
|
||||
clients := ws.subscription.GetClient(userID)
|
||||
if len(clients) == 0 {
|
||||
return
|
||||
}
|
||||
msgContent, err := json.Marshal(platformIDs)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
|
||||
return
|
||||
}
|
||||
now := time.Now().UnixMilli()
|
||||
msgID := idutil.GetMsgIDByMD5(userID)
|
||||
msg := &sdkws.MsgData{
|
||||
SendID: userID,
|
||||
ClientMsgID: msgID,
|
||||
ServerMsgID: msgID,
|
||||
SenderPlatformID: constant.AdminPlatformID,
|
||||
SessionType: constant.NotificationChatType,
|
||||
ContentType: constant.UserSubscribeOnlineStatusNotification,
|
||||
Content: msgContent,
|
||||
SendTime: now,
|
||||
CreateTime: now,
|
||||
}
|
||||
for _, client := range clients {
|
||||
msg.RecvID = client.UserID
|
||||
if err := client.PushMessage(ctx, msg); err != nil {
|
||||
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,135 +1,185 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type UserMap struct {
|
||||
m sync.Map
|
||||
type UserMap interface {
|
||||
GetAll(userID string) ([]*Client, bool)
|
||||
Get(userID string, platformID int) ([]*Client, bool, bool)
|
||||
Set(userID string, v *Client)
|
||||
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
|
||||
UserState() <-chan UserState
|
||||
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
|
||||
RecvSubChange(userID string, platformIDs []int32) bool
|
||||
}
|
||||
|
||||
func newUserMap() *UserMap {
|
||||
return &UserMap{}
|
||||
type UserState struct {
|
||||
UserID string
|
||||
Online []int32
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
func (u *UserMap) GetAll(key string) ([]*Client, bool) {
|
||||
allClients, ok := u.m.Load(key)
|
||||
type UserPlatform struct {
|
||||
Time time.Time
|
||||
Clients []*Client
|
||||
}
|
||||
|
||||
func (u *UserPlatform) PlatformIDs() []int32 {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(u.Clients))
|
||||
for _, client := range u.Clients {
|
||||
platformIDs = append(platformIDs, int32(client.PlatformID))
|
||||
}
|
||||
return platformIDs
|
||||
}
|
||||
|
||||
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
|
||||
if len(u.Clients) == 0 {
|
||||
return nil
|
||||
}
|
||||
platformIDs := make(map[int32]struct{})
|
||||
for _, client := range u.Clients {
|
||||
platformIDs[int32(client.PlatformID)] = struct{}{}
|
||||
}
|
||||
return platformIDs
|
||||
}
|
||||
|
||||
func newUserMap() UserMap {
|
||||
return &userMap{
|
||||
data: make(map[string]*UserPlatform),
|
||||
ch: make(chan UserState, 10000),
|
||||
}
|
||||
}
|
||||
|
||||
type userMap struct {
|
||||
lock sync.RWMutex
|
||||
data map[string]*UserPlatform
|
||||
ch chan UserState
|
||||
}
|
||||
|
||||
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
localPlatformIDs := result.PlatformIDSet()
|
||||
for _, platformID := range platformIDs {
|
||||
delete(localPlatformIDs, platformID)
|
||||
}
|
||||
if len(localPlatformIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
u.push(userID, result, nil)
|
||||
return true
|
||||
}
|
||||
|
||||
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
|
||||
select {
|
||||
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
|
||||
userPlatform.Time = time.Now()
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return result.Clients, true
|
||||
}
|
||||
|
||||
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return nil, false, false
|
||||
}
|
||||
var clients []*Client
|
||||
for _, client := range result.Clients {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
}
|
||||
return clients, true, len(clients) > 0
|
||||
}
|
||||
|
||||
func (u *userMap) Set(userID string, client *Client) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if ok {
|
||||
return allClients.([]*Client), ok
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
|
||||
allClients, userExisted := u.m.Load(key)
|
||||
if userExisted {
|
||||
var clients []*Client
|
||||
for _, client := range allClients.([]*Client) {
|
||||
if client.PlatformID == platformID {
|
||||
clients = append(clients, client)
|
||||
}
|
||||
}
|
||||
if len(clients) > 0 {
|
||||
return clients, userExisted, true
|
||||
}
|
||||
return clients, userExisted, false
|
||||
}
|
||||
return nil, userExisted, false
|
||||
}
|
||||
|
||||
// Set adds a client to the map.
|
||||
func (u *UserMap) Set(key string, v *Client) {
|
||||
allClients, existed := u.m.Load(key)
|
||||
if existed {
|
||||
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
|
||||
oldClients := allClients.([]*Client)
|
||||
oldClients = append(oldClients, v)
|
||||
u.m.Store(key, oldClients)
|
||||
result.Clients = append(result.Clients, client)
|
||||
} else {
|
||||
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
|
||||
|
||||
var clients []*Client
|
||||
clients = append(clients, v)
|
||||
u.m.Store(key, clients)
|
||||
result = &UserPlatform{
|
||||
Clients: []*Client{client},
|
||||
}
|
||||
u.data[userID] = result
|
||||
}
|
||||
u.push(client.UserID, result, nil)
|
||||
}
|
||||
|
||||
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
|
||||
// Attempt to load the clients associated with the key.
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// Return false immediately if the key does not exist.
|
||||
func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
|
||||
if len(clients) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert allClients to a slice of *Client.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
// Keep clients that do not match the connRemoteAddr.
|
||||
if client.ctx.GetRemoteAddr() != connRemoteAddr {
|
||||
remainingClients = append(remainingClients, client)
|
||||
}
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
result, ok := u.data[userID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If no clients remain after filtering, delete the key from the map.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
return true
|
||||
}
|
||||
|
||||
// Otherwise, update the key with the remaining clients.
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
}
|
||||
|
||||
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
|
||||
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
|
||||
return c.ctx.GetRemoteAddr(), struct{}{}
|
||||
offline := make([]int32, 0, len(clients))
|
||||
deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
|
||||
return client.ctx.GetRemoteAddr()
|
||||
})
|
||||
allClients, existed := u.m.Load(key)
|
||||
if !existed {
|
||||
// If the key doesn't exist, return false.
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out clients that are in the deleteMap.
|
||||
oldClients := allClients.([]*Client)
|
||||
var remainingClients []*Client
|
||||
for _, client := range oldClients {
|
||||
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
|
||||
remainingClients = append(remainingClients, client)
|
||||
tmp := result.Clients
|
||||
result.Clients = result.Clients[:0]
|
||||
for _, client := range tmp {
|
||||
if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
|
||||
offline = append(offline, int32(client.PlatformID))
|
||||
} else {
|
||||
result.Clients = append(result.Clients, client)
|
||||
}
|
||||
}
|
||||
|
||||
// Update or delete the key based on the remaining clients.
|
||||
if len(remainingClients) == 0 {
|
||||
u.m.Delete(key)
|
||||
return true
|
||||
defer u.push(userID, result, offline)
|
||||
if len(result.Clients) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
u.m.Store(key, remainingClients)
|
||||
return false
|
||||
delete(u.data, userID)
|
||||
return true
|
||||
}
|
||||
|
||||
func (u *UserMap) DeleteAll(key string) {
|
||||
u.m.Delete(key)
|
||||
func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
|
||||
u.lock.RLock()
|
||||
defer u.lock.RUnlock()
|
||||
result := make([]UserState, 0, len(u.data))
|
||||
for userID, userPlatform := range u.data {
|
||||
if userPlatform.Time.Before(deadline) {
|
||||
continue
|
||||
}
|
||||
userPlatform.Time = nowtime
|
||||
online := make([]int32, 0, len(userPlatform.Clients))
|
||||
for _, client := range userPlatform.Clients {
|
||||
online = append(online, int32(client.PlatformID))
|
||||
}
|
||||
result = append(result, UserState{UserID: userID, Online: online})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (u *userMap) UserState() <-chan UserState {
|
||||
return u.ch
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
pbAuth "github.com/openimsdk/protocol/auth"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"net/http"
|
||||
@ -48,6 +49,7 @@ type LongConnServer interface {
|
||||
KickUserConn(client *Client) error
|
||||
UnRegister(c *Client)
|
||||
SetKickHandlerInfo(i *kickHandler)
|
||||
SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error)
|
||||
Compressor
|
||||
Encoder
|
||||
MessageHandler
|
||||
@ -60,7 +62,9 @@ type WsServer struct {
|
||||
registerChan chan *Client
|
||||
unregisterChan chan *Client
|
||||
kickHandlerChan chan *kickHandler
|
||||
clients *UserMap
|
||||
clients UserMap
|
||||
online *rpccache.OnlineCache
|
||||
subscription *Subscription
|
||||
clientPool sync.Pool
|
||||
onlineUserNum atomic.Int64
|
||||
onlineUserConnNum atomic.Int64
|
||||
@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry,
|
||||
ws.disCov = disCov
|
||||
}
|
||||
|
||||
func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
|
||||
err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "SetUserStatus err", err)
|
||||
}
|
||||
switch status {
|
||||
case constant.Online:
|
||||
ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
|
||||
case constant.Offline:
|
||||
ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
|
||||
}
|
||||
}
|
||||
//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
|
||||
// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "SetUserStatus err", err)
|
||||
// }
|
||||
// switch status {
|
||||
// case constant.Online:
|
||||
// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
|
||||
// case constant.Offline:
|
||||
// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
|
||||
// }
|
||||
//}
|
||||
|
||||
func (ws *WsServer) UnRegister(c *Client) {
|
||||
ws.unregisterChan <- c
|
||||
@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client,
|
||||
return ws.clients.Get(userID, platform)
|
||||
}
|
||||
|
||||
func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
|
||||
func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
|
||||
var config configs
|
||||
for _, o := range opts {
|
||||
o(&config)
|
||||
}
|
||||
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
|
||||
v := validator.New()
|
||||
return &WsServer{
|
||||
msgGatewayConfig: msgGatewayConfig,
|
||||
@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
|
||||
kickHandlerChan: make(chan *kickHandler, 1000),
|
||||
validate: v,
|
||||
clients: newUserMap(),
|
||||
subscription: newSubscription(),
|
||||
Compressor: NewGzipCompressor(),
|
||||
Encoder: NewGobEncoder(),
|
||||
webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) Run(done chan error) error {
|
||||
@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) {
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
|
||||
}()
|
||||
//wg.Add(1)
|
||||
//go func() {
|
||||
// defer wg.Done()
|
||||
// ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
|
||||
//}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string {
|
||||
}
|
||||
|
||||
func (ws *WsServer) KickUserConn(client *Client) error {
|
||||
ws.clients.deleteClients(client.UserID, []*Client{client})
|
||||
ws.clients.DeleteClients(client.UserID, []*Client{client})
|
||||
return client.KickOnlineMessage()
|
||||
}
|
||||
|
||||
@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
|
||||
if !clientOK {
|
||||
return
|
||||
}
|
||||
ws.clients.deleteClients(newClient.UserID, oldClients)
|
||||
ws.clients.DeleteClients(newClient.UserID, oldClients)
|
||||
for _, c := range oldClients {
|
||||
err := c.KickOnlineMessage()
|
||||
if err != nil {
|
||||
@ -345,13 +352,16 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
|
||||
|
||||
func (ws *WsServer) unregisterClient(client *Client) {
|
||||
defer ws.clientPool.Put(client)
|
||||
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
|
||||
isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client})
|
||||
if isDeleteUser {
|
||||
ws.onlineUserNum.Add(-1)
|
||||
prommetrics.OnlineUserGauge.Dec()
|
||||
}
|
||||
ws.onlineUserConnNum.Add(-1)
|
||||
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
|
||||
client.subLock.Lock()
|
||||
clear(client.subUserIDs)
|
||||
client.subLock.Unlock()
|
||||
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
|
||||
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
|
||||
ws.onlineUserNum.Load(), "online user conn Num",
|
||||
ws.onlineUserConnNum.Load(),
|
||||
@ -17,6 +17,7 @@ package msgtransfer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
@ -29,16 +30,12 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
@ -82,12 +79,21 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
msgModel := redis.NewMsgCache(rdb)
|
||||
seqModel := redis.NewSeqCache(rdb)
|
||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
|
||||
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -130,14 +136,8 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
proreg := prometheus.NewRegistry()
|
||||
proreg.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
|
||||
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
|
||||
err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
|
||||
if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbchat "github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
pbpush "github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -45,6 +46,7 @@ type ConsumerHandler struct {
|
||||
pushConsumerGroup *kafka.MConsumerGroup
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
onlinePusher OnlinePusher
|
||||
onlineCache *rpccache.OnlineCache
|
||||
groupLocalCache *rpccache.GroupLocalCache
|
||||
conversationLocalCache *rpccache.ConversationLocalCache
|
||||
msgRpcClient rpcclient.MessageRpcClient
|
||||
@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
consumerHandler.offlinePusher = offlinePusher
|
||||
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
||||
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient,
|
||||
&config.LocalCacheConfig, rdb)
|
||||
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
||||
consumerHandler.config = config
|
||||
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
|
||||
return &consumerHandler, nil
|
||||
}
|
||||
|
||||
@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
|
||||
}
|
||||
|
||||
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
|
||||
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error {
|
||||
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
|
||||
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
|
||||
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
|
||||
return err
|
||||
}
|
||||
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs)
|
||||
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -179,6 +182,38 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
|
||||
var (
|
||||
onlineUserIDs []string
|
||||
offlineUserIDs []string
|
||||
)
|
||||
for _, userID := range pushToUserIDs {
|
||||
online, err := c.onlineCache.GetUserOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if online {
|
||||
onlineUserIDs = append(onlineUserIDs, userID)
|
||||
} else {
|
||||
offlineUserIDs = append(offlineUserIDs, userID)
|
||||
}
|
||||
}
|
||||
var result []*msggateway.SingleMsgToUserResults
|
||||
if len(onlineUserIDs) > 0 {
|
||||
var err error
|
||||
result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, userID := range offlineUserIDs {
|
||||
result = append(result, &msggateway.SingleMsgToUserResults{
|
||||
UserID: userID,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
|
||||
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
|
||||
var pushToUserIDs []string
|
||||
@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
|
||||
return err
|
||||
}
|
||||
|
||||
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
|
||||
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -184,13 +184,23 @@ func (c *conversationServer) GetAllConversations(ctx context.Context, req *pbcon
|
||||
}
|
||||
|
||||
func (c *conversationServer) GetConversations(ctx context.Context, req *pbconversation.GetConversationsReq) (*pbconversation.GetConversationsResp, error) {
|
||||
conversations, err := c.conversationDatabase.FindConversations(ctx, req.OwnerUserID, req.ConversationIDs)
|
||||
conversations, err := c.getConversations(ctx, req.OwnerUserID, req.ConversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbconversation.GetConversationsResp{
|
||||
Conversations: conversations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) getConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*pbconversation.Conversation, error) {
|
||||
conversations, err := c.conversationDatabase.FindConversations(ctx, ownerUserID, conversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pbconversation.GetConversationsResp{Conversations: []*pbconversation.Conversation{}}
|
||||
resp.Conversations = convert.ConversationsDB2Pb(conversations)
|
||||
return resp, nil
|
||||
return convert.ConversationsDB2Pb(conversations), nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) SetConversation(ctx context.Context, req *pbconversation.SetConversationReq) (*pbconversation.SetConversationResp, error) {
|
||||
@ -581,3 +591,14 @@ func (c *conversationServer) UpdateConversation(ctx context.Context, req *pbconv
|
||||
}
|
||||
return &pbconversation.UpdateConversationResp{}, nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) GetOwnerConversation(ctx context.Context, req *pbconversation.GetOwnerConversationReq) (*pbconversation.GetOwnerConversationResp, error) {
|
||||
total, conversations, err := c.conversationDatabase.GetOwnerConversation(ctx, req.UserID, req.Pagination)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbconversation.GetOwnerConversationResp{
|
||||
Total: total,
|
||||
Conversations: convert.ConversationsDB2Pb(conversations),
|
||||
}, nil
|
||||
}
|
||||
|
||||
56
internal/rpc/conversation/sync.go
Normal file
56
internal/rpc/conversation/sync.go
Normal file
@ -0,0 +1,56 @@
|
||||
package conversation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/incrversion"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/hashutil"
|
||||
"github.com/openimsdk/protocol/conversation"
|
||||
)
|
||||
|
||||
func (c *conversationServer) GetFullOwnerConversationIDs(ctx context.Context, req *conversation.GetFullOwnerConversationIDsReq) (*conversation.GetFullOwnerConversationIDsResp, error) {
|
||||
vl, err := c.conversationDatabase.FindMaxConversationUserVersionCache(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conversationIDs, err := c.conversationDatabase.GetConversationIDs(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idHash := hashutil.IdHash(conversationIDs)
|
||||
if req.IdHash == idHash {
|
||||
conversationIDs = nil
|
||||
}
|
||||
return &conversation.GetFullOwnerConversationIDsResp{
|
||||
Version: idHash,
|
||||
VersionID: vl.ID.Hex(),
|
||||
Equal: req.IdHash == idHash,
|
||||
ConversationIDs: conversationIDs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *conversationServer) GetIncrementalConversation(ctx context.Context, req *conversation.GetIncrementalConversationReq) (*conversation.GetIncrementalConversationResp, error) {
|
||||
opt := incrversion.Option[*conversation.Conversation, conversation.GetIncrementalConversationResp]{
|
||||
Ctx: ctx,
|
||||
VersionKey: req.UserID,
|
||||
VersionID: req.VersionID,
|
||||
VersionNumber: req.Version,
|
||||
Version: c.conversationDatabase.FindConversationUserVersion,
|
||||
CacheMaxVersion: c.conversationDatabase.FindMaxConversationUserVersionCache,
|
||||
Find: func(ctx context.Context, conversationIDs []string) ([]*conversation.Conversation, error) {
|
||||
return c.getConversations(ctx, req.UserID, conversationIDs)
|
||||
},
|
||||
ID: func(elem *conversation.Conversation) string { return elem.GroupID },
|
||||
Resp: func(version *model.VersionLog, delIDs []string, insertList, updateList []*conversation.Conversation, full bool) *conversation.GetIncrementalConversationResp {
|
||||
return &conversation.GetIncrementalConversationResp{
|
||||
VersionID: version.ID.Hex(),
|
||||
Version: uint64(version.Version),
|
||||
Full: full,
|
||||
Delete: delIDs,
|
||||
Insert: insertList,
|
||||
Update: updateList,
|
||||
}
|
||||
},
|
||||
}
|
||||
return opt.Build()
|
||||
}
|
||||
@ -86,12 +86,21 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
return err
|
||||
}
|
||||
msgModel := redis.NewMsgCache(rdb)
|
||||
seqModel := redis.NewSeqCache(rdb)
|
||||
conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
|
||||
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -19,13 +19,17 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@ -283,6 +287,52 @@ func (t *thirdServer) apiAddress(prefix, name string) string {
|
||||
return prefix + name
|
||||
}
|
||||
|
||||
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
|
||||
var conf config.Third
|
||||
expireTime := time.UnixMilli(req.ExpireTime)
|
||||
findPagination := &sdkws.RequestPagination{
|
||||
PageNumber: 1,
|
||||
ShowNumber: 1000,
|
||||
}
|
||||
for {
|
||||
total, models, err := t.s3dataBase.FindByExpires(ctx, expireTime, findPagination)
|
||||
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
needDelObjectKeys := make([]string, 0)
|
||||
for _, model := range models {
|
||||
needDelObjectKeys = append(needDelObjectKeys, model.Key)
|
||||
}
|
||||
|
||||
needDelObjectKeys = datautil.Distinct(needDelObjectKeys)
|
||||
for _, key := range needDelObjectKeys {
|
||||
count, err := t.s3dataBase.FindNotDelByS3(ctx, key, expireTime)
|
||||
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
if int(count) < 1 && t.minio != nil {
|
||||
thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
t.s3dataBase.DeleteObject(ctx, thumbnailKey)
|
||||
t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...)
|
||||
t.s3dataBase.DeleteObject(ctx, key)
|
||||
}
|
||||
}
|
||||
for _, model := range models {
|
||||
err := t.s3dataBase.DeleteSpecifiedData(ctx, model.Engine, model.Name)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
if total < int64(findPagination.ShowNumber) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return &third.DeleteOutdatedDataResp{}, nil
|
||||
}
|
||||
|
||||
type FormDataMate struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
|
||||
@ -31,6 +31,7 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
"github.com/openimsdk/tools/s3/kodo"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/openimsdk/tools/s3/oss"
|
||||
"google.golang.org/grpc"
|
||||
@ -42,7 +43,9 @@ type thirdServer struct {
|
||||
userRpcClient rpcclient.UserRpcClient
|
||||
defaultExpire time.Duration
|
||||
config *Config
|
||||
minio *minio.Minio
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
RpcConfig config.Third
|
||||
RedisConfig config.Redis
|
||||
@ -73,14 +76,20 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
}
|
||||
// Select the oss method according to the profile policy
|
||||
enable := config.RpcConfig.Object.Enable
|
||||
var o s3.Interface
|
||||
var (
|
||||
o s3.Interface
|
||||
minioCli *minio.Minio
|
||||
)
|
||||
switch enable {
|
||||
case "minio":
|
||||
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
minioCli, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
o = minioCli
|
||||
case "cos":
|
||||
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
|
||||
case "oss":
|
||||
o, err = oss.NewOSS(*config.RpcConfig.Object.Oss.Build())
|
||||
case "kodo":
|
||||
o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
|
||||
default:
|
||||
err = fmt.Errorf("invalid object enable: %s", enable)
|
||||
}
|
||||
@ -94,10 +103,15 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
s3dataBase: controller.NewS3Database(rdb, o, s3db),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
config: config,
|
||||
minio: minioCli,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) getMinioImageThumbnailKey(ctx context.Context, name string) (string, error) {
|
||||
return t.minio.GetImageThumbnailKey(ctx, name)
|
||||
}
|
||||
|
||||
func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) {
|
||||
err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime)
|
||||
if err != nil {
|
||||
|
||||
122
internal/rpc/user/online.go
Normal file
122
internal/rpc/user/online.go
Normal file
@ -0,0 +1,122 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
)
|
||||
|
||||
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
|
||||
platformIDs, err := s.online.GetOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := pbuser.OnlineStatus{
|
||||
UserID: userID,
|
||||
PlatformIDs: platformIDs,
|
||||
}
|
||||
if len(platformIDs) > 0 {
|
||||
status.Status = constant.Online
|
||||
} else {
|
||||
status.Status = constant.Offline
|
||||
}
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
|
||||
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
status, err := s.getUserOnlineStatus(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, status)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
|
||||
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
|
||||
if req.Genre == constant.SubscriberUser {
|
||||
err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var status []*pbuser.OnlineStatus
|
||||
status, err = s.getUsersOnlineStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
|
||||
} else if req.Genre == constant.Unsubscribe {
|
||||
err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetUserStatus Get the online status of the user.
|
||||
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
|
||||
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetUserStatusResp{StatusList: res}, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Synchronize user's online status.
|
||||
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
|
||||
var (
|
||||
online []int32
|
||||
offline []int32
|
||||
)
|
||||
switch req.Status {
|
||||
case constant.Online:
|
||||
online = []int32{req.PlatformID}
|
||||
case constant.Offline:
|
||||
online = []int32{req.PlatformID}
|
||||
}
|
||||
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := s.db.GetSubscribedList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, userID := range list {
|
||||
tips := &sdkws.UserStatusChangeTips{
|
||||
FromUserID: req.UserID,
|
||||
ToUserID: userID,
|
||||
Status: req.Status,
|
||||
PlatformID: req.PlatformID,
|
||||
}
|
||||
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
|
||||
}
|
||||
|
||||
return &pbuser.SetUserStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetSubscribeUsersStatus Get the online status of subscribers.
|
||||
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
|
||||
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
|
||||
}
|
||||
|
||||
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
|
||||
for _, status := range req.Status {
|
||||
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SetUserOnlineStatusResp{}, nil
|
||||
}
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
@ -50,6 +51,7 @@ import (
|
||||
)
|
||||
|
||||
type userServer struct {
|
||||
online cache.OnlineCache
|
||||
db controller.UserDatabase
|
||||
friendNotificationSender *friend.FriendNotificationSender
|
||||
userNotificationSender *UserNotificationSender
|
||||
@ -98,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
|
||||
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
u := &userServer{
|
||||
online: redis.NewUserOnline(rdb),
|
||||
db: database,
|
||||
RegisterCenter: client,
|
||||
friendRpcClient: &friendRpcClient,
|
||||
@ -329,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR
|
||||
return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil
|
||||
}
|
||||
|
||||
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
|
||||
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) {
|
||||
if req.Genre == constant.SubscriberUser {
|
||||
err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var status []*pbuser.OnlineStatus
|
||||
status, err = s.db.GetUserStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
|
||||
} else if req.Genre == constant.Unsubscribe {
|
||||
err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetUserStatus Get the online status of the user.
|
||||
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp,
|
||||
err error) {
|
||||
onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Synchronize user's online status.
|
||||
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp,
|
||||
err error) {
|
||||
err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := s.db.GetSubscribedList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, userID := range list {
|
||||
tips := &sdkws.UserStatusChangeTips{
|
||||
FromUserID: req.UserID,
|
||||
ToUserID: userID,
|
||||
Status: req.Status,
|
||||
PlatformID: req.PlatformID,
|
||||
}
|
||||
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
|
||||
}
|
||||
|
||||
return &pbuser.SetUserStatusResp{}, nil
|
||||
}
|
||||
|
||||
// GetSubscribeUsersStatus Get the online status of subscribers.
|
||||
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context,
|
||||
req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
|
||||
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
onlineStatusList, err := s.db.GetUserStatus(ctx, userList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
|
||||
}
|
||||
|
||||
// ProcessUserCommandAdd user general function add.
|
||||
func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) {
|
||||
err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID)
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"google.golang.org/grpc"
|
||||
@ -39,7 +40,7 @@ type CronTaskConfig struct {
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime, "msgDestructTime", config.CronTask.RetainChatRecords)
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
|
||||
if config.CronTask.RetainChatRecords < 1 {
|
||||
return errs.New("msg destruct time must be greater than 1").Wrap()
|
||||
}
|
||||
@ -66,10 +67,31 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
}
|
||||
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
|
||||
}
|
||||
if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, clearFunc); err != nil {
|
||||
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearFunc); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
log.ZInfo(ctx, "start cron task", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime)
|
||||
|
||||
tConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thirdClient := third.NewThirdClient(tConn)
|
||||
|
||||
deleteFunc := func() {
|
||||
now := time.Now()
|
||||
deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
|
||||
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
|
||||
log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
|
||||
if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
|
||||
log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
|
||||
}
|
||||
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteFunc); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
|
||||
crontab.Start()
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
package apistruct
|
||||
|
||||
import (
|
||||
sdkws "github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
)
|
||||
|
||||
// SendMsg defines the structure for sending messages with various metadata.
|
||||
|
||||
@ -37,6 +37,7 @@ func NewMsgGatewayCmd() *MsgGatewayCmd {
|
||||
ret.configMap = map[string]any{
|
||||
OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway,
|
||||
ShareFileName: &msgGatewayConfig.Share,
|
||||
RedisConfigFileName: &msgGatewayConfig.RedisConfig,
|
||||
WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig,
|
||||
DiscoveryConfigFilename: &msgGatewayConfig.Discovery,
|
||||
}
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/openimsdk/tools/s3/oss"
|
||||
"github.com/openimsdk/tools/s3/kodo"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@ -108,8 +109,9 @@ type API struct {
|
||||
}
|
||||
|
||||
type CronTask struct {
|
||||
ChatRecordsClearTime string `mapstructure:"chatRecordsClearTime"`
|
||||
RetainChatRecords int `mapstructure:"retainChatRecords"`
|
||||
CronExecuteTime string `mapstructure:"cronExecuteTime"`
|
||||
RetainChatRecords int `mapstructure:"retainChatRecords"`
|
||||
FileExpireTime int `mapstructure:"fileExpireTime"`
|
||||
}
|
||||
|
||||
type OfflinePushConfig struct {
|
||||
@ -281,15 +283,7 @@ type Third struct {
|
||||
Enable string `mapstructure:"enable"`
|
||||
Cos Cos `mapstructure:"cos"`
|
||||
Oss Oss `mapstructure:"oss"`
|
||||
Kodo struct {
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
BucketURL string `mapstructure:"bucketURL"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
AccessKeySecret string `mapstructure:"accessKeySecret"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
} `mapstructure:"kodo"`
|
||||
Kodo Kodo `mapstructure:"kodo"`
|
||||
Aws struct {
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
Region string `mapstructure:"region"`
|
||||
@ -317,6 +311,16 @@ type Oss struct {
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
}
|
||||
|
||||
type Kodo struct {
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
BucketURL string `mapstructure:"bucketURL"`
|
||||
AccessKeyID string `mapstructure:"accessKeyID"`
|
||||
AccessKeySecret string `mapstructure:"accessKeySecret"`
|
||||
SessionToken string `mapstructure:"sessionToken"`
|
||||
PublicRead bool `mapstructure:"publicRead"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
RPC struct {
|
||||
RegisterIP string `mapstructure:"registerIP"`
|
||||
@ -528,6 +532,18 @@ func (o *Oss) Build() *oss.Config {
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Kodo) Build() *kodo.Config {
|
||||
return &kodo.Config{
|
||||
Endpoint: o.Endpoint,
|
||||
Bucket: o.Bucket,
|
||||
BucketURL: o.BucketURL,
|
||||
AccessKeyID: o.AccessKeyID,
|
||||
AccessKeySecret: o.AccessKeySecret,
|
||||
SessionToken: o.SessionToken,
|
||||
PublicRead: o.PublicRead,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *CacheConfig) Failed() time.Duration {
|
||||
return time.Second * time.Duration(l.FailedExpire)
|
||||
}
|
||||
|
||||
@ -14,430 +14,431 @@
|
||||
|
||||
package ginprometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var defaultMetricPath = "/metrics"
|
||||
|
||||
// counter, counter_vec, gauge, gauge_vec,
|
||||
// histogram, histogram_vec, summary, summary_vec.
|
||||
var (
|
||||
reqCounter = &Metric{
|
||||
ID: "reqCnt",
|
||||
Name: "requests_total",
|
||||
Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"code", "method", "handler", "host", "url"}}
|
||||
|
||||
reqDuration = &Metric{
|
||||
ID: "reqDur",
|
||||
Name: "request_duration_seconds",
|
||||
Description: "The HTTP request latencies in seconds.",
|
||||
Type: "histogram_vec",
|
||||
Args: []string{"code", "method", "url"},
|
||||
}
|
||||
|
||||
resSize = &Metric{
|
||||
ID: "resSz",
|
||||
Name: "response_size_bytes",
|
||||
Description: "The HTTP response sizes in bytes.",
|
||||
Type: "summary"}
|
||||
|
||||
reqSize = &Metric{
|
||||
ID: "reqSz",
|
||||
Name: "request_size_bytes",
|
||||
Description: "The HTTP request sizes in bytes.",
|
||||
Type: "summary"}
|
||||
|
||||
standardMetrics = []*Metric{
|
||||
reqCounter,
|
||||
reqDuration,
|
||||
resSize,
|
||||
reqSize,
|
||||
}
|
||||
)
|
||||
|
||||
/*
|
||||
RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
|
||||
the cardinality of the request counter's "url" label, which might be required in some contexts.
|
||||
For instance, if for a "/customer/:name" route you don't want to generate a time series for every
|
||||
possible customer name, you could use this function:
|
||||
|
||||
func(c *gin.Context) string {
|
||||
url := c.Request.URL.Path
|
||||
for _, p := range c.Params {
|
||||
if p.Key == "name" {
|
||||
url = strings.Replace(url, p.Value, ":name", 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
|
||||
*/
|
||||
type RequestCounterURLLabelMappingFn func(c *gin.Context) string
|
||||
|
||||
// Metric is a definition for the name, description, type, ID, and
|
||||
// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
|
||||
type Metric struct {
|
||||
MetricCollector prometheus.Collector
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
Type string
|
||||
Args []string
|
||||
}
|
||||
|
||||
// Prometheus contains the metrics gathered by the instance and its path.
|
||||
type Prometheus struct {
|
||||
reqCnt *prometheus.CounterVec
|
||||
reqDur *prometheus.HistogramVec
|
||||
reqSz, resSz prometheus.Summary
|
||||
router *gin.Engine
|
||||
listenAddress string
|
||||
Ppg PrometheusPushGateway
|
||||
|
||||
MetricsList []*Metric
|
||||
MetricsPath string
|
||||
|
||||
ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
|
||||
|
||||
// gin.Context string to use as a prometheus URL label
|
||||
URLLabelFromContext string
|
||||
}
|
||||
|
||||
// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
|
||||
type PrometheusPushGateway struct {
|
||||
|
||||
// Push interval in seconds
|
||||
PushIntervalSeconds time.Duration
|
||||
|
||||
// Push Gateway URL in format http://domain:port
|
||||
// where JOBNAME can be any string of your choice
|
||||
PushGatewayURL string
|
||||
|
||||
// Local metrics URL where metrics are fetched from, this could be omitted in the future
|
||||
// if implemented using prometheus common/expfmt instead
|
||||
MetricsURL string
|
||||
|
||||
// pushgateway job name, defaults to "gin"
|
||||
Job string
|
||||
}
|
||||
|
||||
// NewPrometheus generates a new set of metrics with a certain subsystem name.
|
||||
func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
|
||||
if subsystem == "" {
|
||||
subsystem = "app"
|
||||
}
|
||||
|
||||
var metricsList []*Metric
|
||||
|
||||
if len(customMetricsList) > 1 {
|
||||
panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
|
||||
} else if len(customMetricsList) == 1 {
|
||||
metricsList = customMetricsList[0]
|
||||
}
|
||||
metricsList = append(metricsList, standardMetrics...)
|
||||
|
||||
p := &Prometheus{
|
||||
MetricsList: metricsList,
|
||||
MetricsPath: defaultMetricPath,
|
||||
ReqCntURLLabelMappingFn: func(c *gin.Context) string {
|
||||
return c.FullPath() // e.g. /user/:id , /user/:id/info
|
||||
},
|
||||
}
|
||||
|
||||
p.registerMetrics(subsystem)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
|
||||
// every pushIntervalSeconds. Metrics are fetched from metricsURL.
|
||||
func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
|
||||
p.Ppg.PushGatewayURL = pushGatewayURL
|
||||
p.Ppg.MetricsURL = metricsURL
|
||||
p.Ppg.PushIntervalSeconds = pushIntervalSeconds
|
||||
p.startPushTicker()
|
||||
}
|
||||
|
||||
// SetPushGatewayJob job name, defaults to "gin".
|
||||
func (p *Prometheus) SetPushGatewayJob(j string) {
|
||||
p.Ppg.Job = j
|
||||
}
|
||||
|
||||
// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
|
||||
// same address of the gin engine that is being used.
|
||||
func (p *Prometheus) SetListenAddress(address string) {
|
||||
p.listenAddress = address
|
||||
if p.listenAddress != "" {
|
||||
p.router = gin.Default()
|
||||
}
|
||||
}
|
||||
|
||||
// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
|
||||
// your content's access log).
|
||||
func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
|
||||
p.listenAddress = listenAddress
|
||||
if len(p.listenAddress) > 0 {
|
||||
p.router = r
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetricsPath set metrics paths.
|
||||
func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
|
||||
|
||||
if p.listenAddress != "" {
|
||||
p.router.GET(p.MetricsPath, prometheusHandler())
|
||||
return p.runServer()
|
||||
} else {
|
||||
e.GET(p.MetricsPath, prometheusHandler())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetricsPathWithAuth set metrics paths with authentication.
|
||||
func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
|
||||
if p.listenAddress != "" {
|
||||
p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
return p.runServer()
|
||||
} else {
|
||||
e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Prometheus) runServer() error {
|
||||
return p.router.Run(p.listenAddress)
|
||||
}
|
||||
|
||||
func (p *Prometheus) getMetrics() []byte {
|
||||
response, err := http.Get(p.Ppg.MetricsURL)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(response.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
var hostname, _ = os.Hostname()
|
||||
|
||||
func (p *Prometheus) getPushGatewayURL() string {
|
||||
if p.Ppg.Job == "" {
|
||||
p.Ppg.Job = "gin"
|
||||
}
|
||||
return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
|
||||
}
|
||||
|
||||
func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
|
||||
req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
fmt.Println("Error sending to push gateway error:", err.Error())
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func (p *Prometheus) startPushTicker() {
|
||||
ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
p.sendMetricsToPushGateway(p.getMetrics())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// NewMetric associates prometheus.Collector based on Metric.Type.
|
||||
func NewMetric(m *Metric, subsystem string) prometheus.Collector {
|
||||
var metric prometheus.Collector
|
||||
switch m.Type {
|
||||
case "counter_vec":
|
||||
metric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "counter":
|
||||
metric = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "gauge_vec":
|
||||
metric = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "gauge":
|
||||
metric = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "histogram_vec":
|
||||
metric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "histogram":
|
||||
metric = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
case "summary_vec":
|
||||
metric = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
m.Args,
|
||||
)
|
||||
case "summary":
|
||||
metric = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Subsystem: subsystem,
|
||||
Name: m.Name,
|
||||
Help: m.Description,
|
||||
},
|
||||
)
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
func (p *Prometheus) registerMetrics(subsystem string) {
|
||||
for _, metricDef := range p.MetricsList {
|
||||
metric := NewMetric(metricDef, subsystem)
|
||||
if err := prometheus.Register(metric); err != nil {
|
||||
fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
|
||||
}
|
||||
|
||||
switch metricDef {
|
||||
case reqCounter:
|
||||
p.reqCnt = metric.(*prometheus.CounterVec)
|
||||
case reqDuration:
|
||||
p.reqDur = metric.(*prometheus.HistogramVec)
|
||||
case resSize:
|
||||
p.resSz = metric.(prometheus.Summary)
|
||||
case reqSize:
|
||||
p.reqSz = metric.(prometheus.Summary)
|
||||
}
|
||||
metricDef.MetricCollector = metric
|
||||
}
|
||||
}
|
||||
|
||||
// Use adds the middleware to a gin engine.
|
||||
func (p *Prometheus) Use(e *gin.Engine) error {
|
||||
e.Use(p.HandlerFunc())
|
||||
return p.SetMetricsPath(e)
|
||||
}
|
||||
|
||||
// UseWithAuth adds the middleware to a gin engine with BasicAuth.
|
||||
func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
e.Use(p.HandlerFunc())
|
||||
return p.SetMetricsPathWithAuth(e, accounts)
|
||||
}
|
||||
|
||||
// HandlerFunc defines handler function for middleware.
|
||||
func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Request.URL.Path == p.MetricsPath {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
reqSz := computeApproximateRequestSize(c.Request)
|
||||
|
||||
c.Next()
|
||||
|
||||
status := strconv.Itoa(c.Writer.Status())
|
||||
elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
resSz := float64(c.Writer.Size())
|
||||
|
||||
url := p.ReqCntURLLabelMappingFn(c)
|
||||
if len(p.URLLabelFromContext) > 0 {
|
||||
u, found := c.Get(p.URLLabelFromContext)
|
||||
if !found {
|
||||
u = "unknown"
|
||||
}
|
||||
url = u.(string)
|
||||
}
|
||||
p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
|
||||
p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
|
||||
p.reqSz.Observe(float64(reqSz))
|
||||
p.resSz.Observe(resSz)
|
||||
}
|
||||
}
|
||||
|
||||
func prometheusHandler() gin.HandlerFunc {
|
||||
h := promhttp.Handler()
|
||||
return func(c *gin.Context) {
|
||||
h.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) int {
|
||||
var s int
|
||||
if r.URL != nil {
|
||||
s = len(r.URL.Path)
|
||||
}
|
||||
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// r.FormData and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
return s
|
||||
}
|
||||
//
|
||||
//import (
|
||||
// "bytes"
|
||||
// "fmt"
|
||||
// "io"
|
||||
// "net/http"
|
||||
// "os"
|
||||
// "strconv"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/gin-gonic/gin"
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
//)
|
||||
//
|
||||
//var defaultMetricPath = "/metrics"
|
||||
//
|
||||
//// counter, counter_vec, gauge, gauge_vec,
|
||||
//// histogram, histogram_vec, summary, summary_vec.
|
||||
//var (
|
||||
// reqCounter = &Metric{
|
||||
// ID: "reqCnt",
|
||||
// Name: "requests_total",
|
||||
// Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
|
||||
// Type: "counter_vec",
|
||||
// Args: []string{"code", "method", "handler", "host", "url"}}
|
||||
//
|
||||
// reqDuration = &Metric{
|
||||
// ID: "reqDur",
|
||||
// Name: "request_duration_seconds",
|
||||
// Description: "The HTTP request latencies in seconds.",
|
||||
// Type: "histogram_vec",
|
||||
// Args: []string{"code", "method", "url"},
|
||||
// }
|
||||
//
|
||||
// resSize = &Metric{
|
||||
// ID: "resSz",
|
||||
// Name: "response_size_bytes",
|
||||
// Description: "The HTTP response sizes in bytes.",
|
||||
// Type: "summary"}
|
||||
//
|
||||
// reqSize = &Metric{
|
||||
// ID: "reqSz",
|
||||
// Name: "request_size_bytes",
|
||||
// Description: "The HTTP request sizes in bytes.",
|
||||
// Type: "summary"}
|
||||
//
|
||||
// standardMetrics = []*Metric{
|
||||
// reqCounter,
|
||||
// reqDuration,
|
||||
// resSize,
|
||||
// reqSize,
|
||||
// }
|
||||
//)
|
||||
//
|
||||
///*
|
||||
//RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
|
||||
//the cardinality of the request counter's "url" label, which might be required in some contexts.
|
||||
//For instance, if for a "/customer/:name" route you don't want to generate a time series for every
|
||||
//possible customer name, you could use this function:
|
||||
//
|
||||
// func(c *gin.Context) string {
|
||||
// url := c.Request.URL.Path
|
||||
// for _, p := range c.Params {
|
||||
// if p.Key == "name" {
|
||||
// url = strings.Replace(url, p.Value, ":name", 1)
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// return url
|
||||
// }
|
||||
//
|
||||
//which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
|
||||
//*/
|
||||
//type RequestCounterURLLabelMappingFn func(c *gin.Context) string
|
||||
//
|
||||
//// Metric is a definition for the name, description, type, ID, and
|
||||
//// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
|
||||
//type Metric struct {
|
||||
// MetricCollector prometheus.Collector
|
||||
// ID string
|
||||
// Name string
|
||||
// Description string
|
||||
// Type string
|
||||
// Args []string
|
||||
//}
|
||||
//
|
||||
//// Prometheus contains the metrics gathered by the instance and its path.
|
||||
//type Prometheus struct {
|
||||
// reqCnt *prometheus.CounterVec
|
||||
// reqDur *prometheus.HistogramVec
|
||||
// reqSz, resSz prometheus.Summary
|
||||
// router *gin.Engine
|
||||
// listenAddress string
|
||||
// Ppg PrometheusPushGateway
|
||||
//
|
||||
// MetricsList []*Metric
|
||||
// MetricsPath string
|
||||
//
|
||||
// ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
|
||||
//
|
||||
// // gin.Context string to use as a prometheus URL label
|
||||
// URLLabelFromContext string
|
||||
//}
|
||||
//
|
||||
//// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
|
||||
//type PrometheusPushGateway struct {
|
||||
//
|
||||
// // Push interval in seconds
|
||||
// PushIntervalSeconds time.Duration
|
||||
//
|
||||
// // Push Gateway URL in format http://domain:port
|
||||
// // where JOBNAME can be any string of your choice
|
||||
// PushGatewayURL string
|
||||
//
|
||||
// // Local metrics URL where metrics are fetched from, this could be omitted in the future
|
||||
// // if implemented using prometheus common/expfmt instead
|
||||
// MetricsURL string
|
||||
//
|
||||
// // pushgateway job name, defaults to "gin"
|
||||
// Job string
|
||||
//}
|
||||
//
|
||||
//// NewPrometheus generates a new set of metrics with a certain subsystem name.
|
||||
//func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
|
||||
// if subsystem == "" {
|
||||
// subsystem = "app"
|
||||
// }
|
||||
//
|
||||
// var metricsList []*Metric
|
||||
//
|
||||
// if len(customMetricsList) > 1 {
|
||||
// panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
|
||||
// } else if len(customMetricsList) == 1 {
|
||||
// metricsList = customMetricsList[0]
|
||||
// }
|
||||
// metricsList = append(metricsList, standardMetrics...)
|
||||
//
|
||||
// p := &Prometheus{
|
||||
// MetricsList: metricsList,
|
||||
// MetricsPath: defaultMetricPath,
|
||||
// ReqCntURLLabelMappingFn: func(c *gin.Context) string {
|
||||
// return c.FullPath() // e.g. /user/:id , /user/:id/info
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// p.registerMetrics(subsystem)
|
||||
//
|
||||
// return p
|
||||
//}
|
||||
//
|
||||
//// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
|
||||
//// every pushIntervalSeconds. Metrics are fetched from metricsURL.
|
||||
//func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
|
||||
// p.Ppg.PushGatewayURL = pushGatewayURL
|
||||
// p.Ppg.MetricsURL = metricsURL
|
||||
// p.Ppg.PushIntervalSeconds = pushIntervalSeconds
|
||||
// p.startPushTicker()
|
||||
//}
|
||||
//
|
||||
//// SetPushGatewayJob job name, defaults to "gin".
|
||||
//func (p *Prometheus) SetPushGatewayJob(j string) {
|
||||
// p.Ppg.Job = j
|
||||
//}
|
||||
//
|
||||
//// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
|
||||
//// same address of the gin engine that is being used.
|
||||
//func (p *Prometheus) SetListenAddress(address string) {
|
||||
// p.listenAddress = address
|
||||
// if p.listenAddress != "" {
|
||||
// p.router = gin.Default()
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
|
||||
//// your content's access log).
|
||||
//func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
|
||||
// p.listenAddress = listenAddress
|
||||
// if len(p.listenAddress) > 0 {
|
||||
// p.router = r
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetMetricsPath set metrics paths.
|
||||
//func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
|
||||
//
|
||||
// if p.listenAddress != "" {
|
||||
// p.router.GET(p.MetricsPath, prometheusHandler())
|
||||
// return p.runServer()
|
||||
// } else {
|
||||
// e.GET(p.MetricsPath, prometheusHandler())
|
||||
// return nil
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetMetricsPathWithAuth set metrics paths with authentication.
|
||||
//func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
//
|
||||
// if p.listenAddress != "" {
|
||||
// p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
// return p.runServer()
|
||||
// } else {
|
||||
// e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) runServer() error {
|
||||
// return p.router.Run(p.listenAddress)
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) getMetrics() []byte {
|
||||
// response, err := http.Get(p.Ppg.MetricsURL)
|
||||
// if err != nil {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// defer response.Body.Close()
|
||||
//
|
||||
// body, _ := io.ReadAll(response.Body)
|
||||
// return body
|
||||
//}
|
||||
//
|
||||
//var hostname, _ = os.Hostname()
|
||||
//
|
||||
//func (p *Prometheus) getPushGatewayURL() string {
|
||||
// if p.Ppg.Job == "" {
|
||||
// p.Ppg.Job = "gin"
|
||||
// }
|
||||
// return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
|
||||
// req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// client := &http.Client{}
|
||||
// resp, err := client.Do(req)
|
||||
// if err != nil {
|
||||
// fmt.Println("Error sending to push gateway error:", err.Error())
|
||||
// }
|
||||
//
|
||||
// resp.Body.Close()
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) startPushTicker() {
|
||||
// ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
|
||||
// go func() {
|
||||
// for range ticker.C {
|
||||
// p.sendMetricsToPushGateway(p.getMetrics())
|
||||
// }
|
||||
// }()
|
||||
//}
|
||||
//
|
||||
//// NewMetric associates prometheus.Collector based on Metric.Type.
|
||||
//func NewMetric(m *Metric, subsystem string) prometheus.Collector {
|
||||
// var metric prometheus.Collector
|
||||
// switch m.Type {
|
||||
// case "counter_vec":
|
||||
// metric = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "counter":
|
||||
// metric = prometheus.NewCounter(
|
||||
// prometheus.CounterOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "gauge_vec":
|
||||
// metric = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "gauge":
|
||||
// metric = prometheus.NewGauge(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "histogram_vec":
|
||||
// metric = prometheus.NewHistogramVec(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "histogram":
|
||||
// metric = prometheus.NewHistogram(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "summary_vec":
|
||||
// metric = prometheus.NewSummaryVec(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "summary":
|
||||
// metric = prometheus.NewSummary(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// }
|
||||
// return metric
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) registerMetrics(subsystem string) {
|
||||
// for _, metricDef := range p.MetricsList {
|
||||
// metric := NewMetric(metricDef, subsystem)
|
||||
// if err := prometheus.Register(metric); err != nil {
|
||||
// fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
|
||||
// }
|
||||
//
|
||||
// switch metricDef {
|
||||
// case reqCounter:
|
||||
// p.reqCnt = metric.(*prometheus.CounterVec)
|
||||
// case reqDuration:
|
||||
// p.reqDur = metric.(*prometheus.HistogramVec)
|
||||
// case resSize:
|
||||
// p.resSz = metric.(prometheus.Summary)
|
||||
// case reqSize:
|
||||
// p.reqSz = metric.(prometheus.Summary)
|
||||
// }
|
||||
// metricDef.MetricCollector = metric
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// Use adds the middleware to a gin engine.
|
||||
//func (p *Prometheus) Use(e *gin.Engine) error {
|
||||
// e.Use(p.HandlerFunc())
|
||||
// return p.SetMetricsPath(e)
|
||||
//}
|
||||
//
|
||||
//// UseWithAuth adds the middleware to a gin engine with BasicAuth.
|
||||
//func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
// e.Use(p.HandlerFunc())
|
||||
// return p.SetMetricsPathWithAuth(e, accounts)
|
||||
//}
|
||||
//
|
||||
//// HandlerFunc defines handler function for middleware.
|
||||
//func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
|
||||
// return func(c *gin.Context) {
|
||||
// if c.Request.URL.Path == p.MetricsPath {
|
||||
// c.Next()
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// reqSz := computeApproximateRequestSize(c.Request)
|
||||
//
|
||||
// c.Next()
|
||||
//
|
||||
// status := strconv.Itoa(c.Writer.Status())
|
||||
// elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
// resSz := float64(c.Writer.Size())
|
||||
//
|
||||
// url := p.ReqCntURLLabelMappingFn(c)
|
||||
// if len(p.URLLabelFromContext) > 0 {
|
||||
// u, found := c.Get(p.URLLabelFromContext)
|
||||
// if !found {
|
||||
// u = "unknown"
|
||||
// }
|
||||
// url = u.(string)
|
||||
// }
|
||||
// p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
|
||||
// p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
|
||||
// p.reqSz.Observe(float64(reqSz))
|
||||
// p.resSz.Observe(resSz)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func prometheusHandler() gin.HandlerFunc {
|
||||
// h := promhttp.Handler()
|
||||
// return func(c *gin.Context) {
|
||||
// h.ServeHTTP(c.Writer, c.Request)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func computeApproximateRequestSize(r *http.Request) int {
|
||||
// var s int
|
||||
// if r.URL != nil {
|
||||
// s = len(r.URL.Path)
|
||||
// }
|
||||
//
|
||||
// s += len(r.Method)
|
||||
// s += len(r.Proto)
|
||||
// for name, values := range r.Header {
|
||||
// s += len(name)
|
||||
// for _, value := range values {
|
||||
// s += len(value)
|
||||
// }
|
||||
// }
|
||||
// s += len(r.Host)
|
||||
//
|
||||
// // r.FormData and r.MultipartForm are assumed to be included in r.URL.
|
||||
//
|
||||
// if r.ContentLength != -1 {
|
||||
// s += int(r.ContentLength)
|
||||
// }
|
||||
// return s
|
||||
//}
|
||||
|
||||
48
pkg/common/prommetrics/api.go
Normal file
48
pkg/common/prommetrics/api.go
Normal file
@ -0,0 +1,48 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
apiCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "api_count",
|
||||
Help: "Total number of API calls",
|
||||
},
|
||||
[]string{"path", "method", "code"},
|
||||
)
|
||||
httpCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_count",
|
||||
Help: "Total number of HTTP calls",
|
||||
},
|
||||
[]string{"path", "method", "status"},
|
||||
)
|
||||
)
|
||||
|
||||
func ApiInit(prometheusPort int) error {
|
||||
apiRegistry := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
apiCounter,
|
||||
httpCounter,
|
||||
)
|
||||
return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
|
||||
}
|
||||
|
||||
func APICall(path string, method string, apiCode int) {
|
||||
apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
|
||||
}
|
||||
|
||||
func HttpCall(path string, method string, status int) {
|
||||
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
|
||||
}
|
||||
|
||||
//func ApiHandler() http.Handler {
|
||||
// return promhttp.InstrumentMetricHandler(
|
||||
// apiRegistry, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}),
|
||||
// )
|
||||
//}
|
||||
@ -1,30 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
|
||||
|
||||
/*
|
||||
labels := prometheus.Labels{"label_one": "any", "label_two": "value"}
|
||||
ApiCustomCnt.MetricCollector.(*prometheus.CounterVec).With(labels).Inc().
|
||||
*/
|
||||
var (
|
||||
ApiCustomCnt = &ginprom.Metric{
|
||||
Name: "custom_total",
|
||||
Description: "Custom counter events.",
|
||||
Type: "counter_vec",
|
||||
Args: []string{"label_one", "label_two"},
|
||||
}
|
||||
)
|
||||
@ -15,44 +15,24 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
|
||||
"fmt"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func NewGrpcPromObj(cusMetrics []prometheus.Collector) (*prometheus.Registry, *gp.ServerMetrics, error) {
|
||||
reg := prometheus.NewRegistry()
|
||||
grpcMetrics := gp.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
cusMetrics = append(cusMetrics, grpcMetrics, collectors.NewGoCollector())
|
||||
reg.MustRegister(cusMetrics...)
|
||||
return reg, grpcMetrics, nil
|
||||
}
|
||||
const commonPath = "/metrics"
|
||||
|
||||
func GetGrpcCusMetrics(registerName string, share *config2.Share) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case share.RpcRegisterName.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case share.RpcRegisterName.Msg:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
case "Transfer":
|
||||
return []prometheus.Collector{MsgInsertRedisSuccessCounter, MsgInsertRedisFailedCounter, MsgInsertMongoSuccessCounter, MsgInsertMongoFailedCounter, SeqSetFailedCounter}
|
||||
case share.RpcRegisterName.Push:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
case share.RpcRegisterName.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
default:
|
||||
return nil
|
||||
var (
|
||||
baseCollector = []prometheus.Collector{
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
collectors.NewGoCollector(),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func GetGinCusMetrics(name string) []*ginprometheus.Metric {
|
||||
switch name {
|
||||
case "Api":
|
||||
return []*ginprometheus.Metric{ApiCustomCnt}
|
||||
default:
|
||||
return []*ginprometheus.Metric{ApiCustomCnt}
|
||||
}
|
||||
func Init(registry *prometheus.Registry, prometheusPort int, path string, handler http.Handler, cs ...prometheus.Collector) error {
|
||||
registry.MustRegister(cs...)
|
||||
srv := http.NewServeMux()
|
||||
srv.Handle(path, handler)
|
||||
return http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), srv)
|
||||
}
|
||||
|
||||
@ -14,46 +14,39 @@
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewGrpcPromObj(t *testing.T) {
|
||||
// Create a custom metric to pass into the NewGrpcPromObj function.
|
||||
customMetric := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "test_metric",
|
||||
Help: "This is a test metric.",
|
||||
})
|
||||
cusMetrics := []prometheus.Collector{customMetric}
|
||||
|
||||
// Call NewGrpcPromObj with the custom metrics.
|
||||
reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
|
||||
|
||||
// Assert no error was returned.
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Assert the registry was correctly initialized.
|
||||
assert.NotNil(t, reg)
|
||||
|
||||
// Assert the grpcMetrics was correctly initialized.
|
||||
assert.NotNil(t, grpcMetrics)
|
||||
|
||||
// Assert that the custom metric is registered.
|
||||
mfs, err := reg.Gather()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, mfs) // Ensure some metrics are present.
|
||||
found := false
|
||||
for _, mf := range mfs {
|
||||
if *mf.Name == "test_metric" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Custom metric not found in registry")
|
||||
}
|
||||
//func TestNewGrpcPromObj(t *testing.T) {
|
||||
// // Create a custom metric to pass into the NewGrpcPromObj function.
|
||||
// customMetric := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// Name: "test_metric",
|
||||
// Help: "This is a test metric.",
|
||||
// })
|
||||
// cusMetrics := []prometheus.Collector{customMetric}
|
||||
//
|
||||
// // Call NewGrpcPromObj with the custom metrics.
|
||||
// reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
|
||||
//
|
||||
// // Assert no error was returned.
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// // Assert the registry was correctly initialized.
|
||||
// assert.NotNil(t, reg)
|
||||
//
|
||||
// // Assert the grpcMetrics was correctly initialized.
|
||||
// assert.NotNil(t, grpcMetrics)
|
||||
//
|
||||
// // Assert that the custom metric is registered.
|
||||
// mfs, err := reg.Gather()
|
||||
// assert.NoError(t, err)
|
||||
// assert.NotEmpty(t, mfs) // Ensure some metrics are present.
|
||||
// found := false
|
||||
// for _, mf := range mfs {
|
||||
// if *mf.Name == "test_metric" {
|
||||
// found = true
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// assert.True(t, found, "Custom metric not found in registry")
|
||||
//}
|
||||
|
||||
//func TestGetGrpcCusMetrics(t *testing.T) {
|
||||
// conf := config2.NewGlobalConfig()
|
||||
|
||||
58
pkg/common/prommetrics/rpc.go
Normal file
58
pkg/common/prommetrics/rpc.go
Normal file
@ -0,0 +1,58 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const rpcPath = commonPath
|
||||
|
||||
var (
|
||||
grpcMetrics *gp.ServerMetrics
|
||||
rpcCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "rpc_count",
|
||||
Help: "Total number of RPC calls",
|
||||
},
|
||||
[]string{"name", "path", "code"},
|
||||
)
|
||||
)
|
||||
|
||||
func RpcInit(cs []prometheus.Collector, prometheusPort int) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs = append(append(
|
||||
baseCollector,
|
||||
rpcCounter,
|
||||
), cs...)
|
||||
return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
|
||||
func RPCCall(name string, path string, code int) {
|
||||
rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
|
||||
}
|
||||
|
||||
func GetGrpcServerMetrics() *gp.ServerMetrics {
|
||||
if grpcMetrics == nil {
|
||||
grpcMetrics = gp.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
}
|
||||
return grpcMetrics
|
||||
}
|
||||
|
||||
func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case share.RpcRegisterName.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case share.RpcRegisterName.Msg:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
case share.RpcRegisterName.Push:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
case share.RpcRegisterName.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -16,6 +16,7 @@ package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -40,3 +41,16 @@ var (
|
||||
Help: "The number of failed set seq",
|
||||
})
|
||||
)
|
||||
|
||||
func TransferInit(prometheusPort int) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
MsgInsertRedisSuccessCounter,
|
||||
MsgInsertRedisFailedCounter,
|
||||
MsgInsertMongoSuccessCounter,
|
||||
MsgInsertMongoFailedCounter,
|
||||
SeqSetFailedCounter,
|
||||
)
|
||||
return Init(reg, prometheusPort, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
|
||||
@ -17,9 +17,9 @@ package startrpc
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
config2 "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"google.golang.org/grpc/status"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -29,7 +29,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -38,14 +37,13 @@ import (
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// Start rpc server.
|
||||
func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusConfig *config2.Prometheus, listenIP,
|
||||
registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config2.Share, config T, rpcFn func(ctx context.Context,
|
||||
func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusConfig *config.Prometheus, listenIP,
|
||||
registerIP string, rpcPorts []int, index int, rpcRegisterName string, share *config.Share, config T, rpcFn func(ctx context.Context,
|
||||
config T, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error, options ...grpc.ServerOption) error {
|
||||
|
||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||
@ -77,13 +75,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
|
||||
return err
|
||||
}
|
||||
|
||||
var reg *prometheus.Registry
|
||||
var metric *grpcprometheus.ServerMetrics
|
||||
//var reg *prometheus.Registry
|
||||
//var metric *grpcprometheus.ServerMetrics
|
||||
if prometheusConfig.Enable {
|
||||
cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
|
||||
reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
|
||||
options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
|
||||
grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
|
||||
//cusMetrics := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
|
||||
//reg, metric, _ = prommetrics.NewGrpcPromObj(cusMetrics)
|
||||
//options = append(options, mw.GrpcServer(), grpc.StreamInterceptor(metric.StreamServerInterceptor()),
|
||||
// grpc.UnaryInterceptor(metric.UnaryServerInterceptor()))
|
||||
options = append(
|
||||
options, mw.GrpcServer(),
|
||||
prommetricsUnaryInterceptor(rpcRegisterName),
|
||||
prommetricsStreamInterceptor(rpcRegisterName),
|
||||
)
|
||||
} else {
|
||||
options = append(options, mw.GrpcServer())
|
||||
}
|
||||
@ -122,13 +125,18 @@ func Start[T any](ctx context.Context, discovery *config2.Discovery, prometheusC
|
||||
netDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
metric.InitializeMetrics(srv)
|
||||
// Create a HTTP server for prometheus.
|
||||
httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||
if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
||||
cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, share)
|
||||
if err := prommetrics.RpcInit(cs, prometheusPort); err != nil && err != http.ErrServerClosed {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
//metric.InitializeMetrics(srv)
|
||||
// Create a HTTP server for prometheus.
|
||||
//httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||
//if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
// netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
||||
// netDone <- struct{}{}
|
||||
//}
|
||||
}()
|
||||
}
|
||||
|
||||
@ -175,3 +183,25 @@ func gracefulStopWithCtx(ctx context.Context, f func()) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
getCode := func(err error) int {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
rpcErr, ok := err.(interface{ GRPCStatus() *status.Status })
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
return int(rpcErr.GRPCStatus().Code())
|
||||
}
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
resp, err := handler(ctx, req)
|
||||
prommetrics.RPCCall(rpcRegisterName, info.FullMethod, getCode(err))
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
return grpc.ChainStreamInterceptor()
|
||||
}
|
||||
|
||||
@ -23,6 +23,7 @@ const (
|
||||
SuperGroupRecvMsgNotNotifyUserIDsKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS:"
|
||||
SuperGroupRecvMsgNotNotifyUserIDsHashKey = "SUPER_GROUP_RECV_MSG_NOT_NOTIFY_USER_IDS_HASH:"
|
||||
ConversationNotReceiveMessageUserIDsKey = "CONVERSATION_NOT_RECEIVE_MESSAGE_USER_IDS:"
|
||||
ConversationUserMaxKey = "CONVERSATION_USER_MAX:"
|
||||
)
|
||||
|
||||
func GetConversationKey(ownerUserID, conversationID string) string {
|
||||
@ -56,3 +57,7 @@ func GetConversationNotReceiveMessageUserIDsKey(conversationID string) string {
|
||||
func GetUserConversationIDsHashKey(ownerUserID string) string {
|
||||
return ConversationIDsHashKey + ownerUserID
|
||||
}
|
||||
|
||||
func GetConversationUserMaxVersionKey(userID string) string {
|
||||
return ConversationUserMaxKey + userID
|
||||
}
|
||||
|
||||
13
pkg/common/storage/cache/cachekey/online.go
vendored
Normal file
13
pkg/common/storage/cache/cachekey/online.go
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package cachekey
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
OnlineKey = "ONLINE:"
|
||||
OnlineChannel = "online_change"
|
||||
OnlineExpire = time.Hour / 2
|
||||
)
|
||||
|
||||
func GetOnlineKey(userID string) string {
|
||||
return OnlineKey + userID
|
||||
}
|
||||
44
pkg/common/storage/cache/cachekey/seq.go
vendored
44
pkg/common/storage/cache/cachekey/seq.go
vendored
@ -1,38 +1,30 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cachekey
|
||||
|
||||
const (
|
||||
maxSeq = "MAX_SEQ:"
|
||||
minSeq = "MIN_SEQ:"
|
||||
conversationUserMinSeq = "CON_USER_MIN_SEQ:"
|
||||
hasReadSeq = "HAS_READ_SEQ:"
|
||||
MallocSeq = "MALLOC_SEQ:"
|
||||
MallocMinSeqLock = "MALLOC_MIN_SEQ:"
|
||||
|
||||
SeqUserMaxSeq = "SEQ_USER_MAX:"
|
||||
SeqUserMinSeq = "SEQ_USER_MIN:"
|
||||
SeqUserReadSeq = "SEQ_USER_READ:"
|
||||
)
|
||||
|
||||
func GetMaxSeqKey(conversationID string) string {
|
||||
return maxSeq + conversationID
|
||||
func GetMallocSeqKey(conversationID string) string {
|
||||
return MallocSeq + conversationID
|
||||
}
|
||||
|
||||
func GetMinSeqKey(conversationID string) string {
|
||||
return minSeq + conversationID
|
||||
func GetMallocMinSeqKey(conversationID string) string {
|
||||
return MallocMinSeqLock + conversationID
|
||||
}
|
||||
|
||||
func GetHasReadSeqKey(conversationID string, userID string) string {
|
||||
return hasReadSeq + userID + ":" + conversationID
|
||||
func GetSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserMaxSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func GetConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return conversationUserMinSeq + conversationID + "u:" + userID
|
||||
func GetSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserMinSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
func GetSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return SeqUserReadSeq + conversationID + ":" + userID
|
||||
}
|
||||
|
||||
5
pkg/common/storage/cache/cachekey/user.go
vendored
5
pkg/common/storage/cache/cachekey/user.go
vendored
@ -17,7 +17,6 @@ package cachekey
|
||||
const (
|
||||
UserInfoKey = "USER_INFO:"
|
||||
UserGlobalRecvMsgOptKey = "USER_GLOBAL_RECV_MSG_OPT_KEY:"
|
||||
olineStatusKey = "ONLINE_STATUS:"
|
||||
)
|
||||
|
||||
func GetUserInfoKey(userID string) string {
|
||||
@ -27,7 +26,3 @@ func GetUserInfoKey(userID string) string {
|
||||
func GetUserGlobalRecvMsgOptKey(userID string) string {
|
||||
return UserGlobalRecvMsgOptKey + userID
|
||||
}
|
||||
|
||||
func GetOnlineStatusKey(modKey string) string {
|
||||
return olineStatusKey + modKey
|
||||
}
|
||||
|
||||
4
pkg/common/storage/cache/conversation.go
vendored
4
pkg/common/storage/cache/conversation.go
vendored
@ -54,4 +54,8 @@ type ConversationCache interface {
|
||||
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
|
||||
DelConversationVersionUserIDs(userIDs ...string) ConversationCache
|
||||
|
||||
FindMaxConversationUserVersion(ctx context.Context, userID string) (*relationtb.VersionLog, error)
|
||||
}
|
||||
|
||||
1
pkg/common/storage/cache/group.go
vendored
1
pkg/common/storage/cache/group.go
vendored
@ -36,7 +36,6 @@ type GroupCache interface {
|
||||
DelGroupMembersHash(groupID string) GroupCache
|
||||
|
||||
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
|
||||
GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (groupMemberIDs map[string][]string, err error)
|
||||
|
||||
DelGroupMemberIDs(groupID string) GroupCache
|
||||
|
||||
|
||||
8
pkg/common/storage/cache/online.go
vendored
Normal file
8
pkg/common/storage/cache/online.go
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type OnlineCache interface {
|
||||
GetOnline(ctx context.Context, userID string) ([]int32, error)
|
||||
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
|
||||
}
|
||||
94
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
94
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getRocksCacheRedisClient(cli *rockscache.Client) redis.UniversalClient {
|
||||
type Client struct {
|
||||
rdb redis.UniversalClient
|
||||
_ rockscache.Options
|
||||
_ singleflight.Group
|
||||
}
|
||||
return (*Client)(unsafe.Pointer(cli)).rdb
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscache.Client, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
findKeys := make([]string, 0, len(ids))
|
||||
keyId := make(map[string]K)
|
||||
for _, id := range ids {
|
||||
key := idKey(id)
|
||||
if _, ok := keyId[key]; ok {
|
||||
continue
|
||||
}
|
||||
keyId[key] = id
|
||||
findKeys = append(findKeys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), findKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*V, 0, len(findKeys))
|
||||
for _, keys := range slotKeys {
|
||||
indexCache, err := rcClient.FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
queryIds := make([]K, 0, len(idx))
|
||||
idIndex := make(map[K]int)
|
||||
for _, index := range idx {
|
||||
id := keyId[keys[index]]
|
||||
idIndex[id] = index
|
||||
queryIds = append(queryIds, id)
|
||||
}
|
||||
values, err := fn(ctx, queryIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return map[int]string{}, nil
|
||||
}
|
||||
cacheIndex := make(map[int]string)
|
||||
for _, value := range values {
|
||||
id := vId(value)
|
||||
index, ok := idIndex[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cacheIndex[index] = string(bs)
|
||||
}
|
||||
return cacheIndex, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for index, data := range indexCache {
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
var value V
|
||||
if err := json.Unmarshal([]byte(data), &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
|
||||
cb.BatchCache(keyId[keys[index]])
|
||||
}
|
||||
result = append(result, &value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type BatchCacheCallback[K comparable] interface {
|
||||
BatchCache(id K)
|
||||
}
|
||||
55
pkg/common/storage/cache/redis/batch_handler.go
vendored
55
pkg/common/storage/cache/redis/batch_handler.go
vendored
@ -23,7 +23,6 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw/specialerror"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
@ -147,30 +146,30 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func batchGetCache[T any, K comparable](
|
||||
ctx context.Context,
|
||||
rcClient *rockscache.Client,
|
||||
expire time.Duration,
|
||||
keys []K,
|
||||
keyFn func(key K) string,
|
||||
fns func(ctx context.Context, key K) (T, error),
|
||||
) ([]T, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]T, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
return fns(ctx, key)
|
||||
})
|
||||
if err != nil {
|
||||
if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
|
||||
continue
|
||||
}
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
res = append(res, val)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
//func batchGetCache[T any, K comparable](
|
||||
// ctx context.Context,
|
||||
// rcClient *rockscache.Client,
|
||||
// expire time.Duration,
|
||||
// keys []K,
|
||||
// keyFn func(key K) string,
|
||||
// fns func(ctx context.Context, key K) (T, error),
|
||||
//) ([]T, error) {
|
||||
// if len(keys) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
// res := make([]T, 0, len(keys))
|
||||
// for _, key := range keys {
|
||||
// val, err := getCache(ctx, rcClient, keyFn(key), expire, func(ctx context.Context) (T, error) {
|
||||
// return fns(ctx, key)
|
||||
// })
|
||||
// if err != nil {
|
||||
// if errs.ErrRecordNotFound.Is(specialerror.ErrCode(errs.Unwrap(err))) {
|
||||
// continue
|
||||
// }
|
||||
// return nil, errs.Wrap(err)
|
||||
// }
|
||||
// res = append(res, val)
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
55
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
55
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
//var rocks rockscache.Client
|
||||
//rdb := getRocksCacheRedisClient(&rocks)
|
||||
//t.Log(rdb == nil)
|
||||
|
||||
ctx := context.Background()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
|
||||
Address: []string{"172.16.8.48:16379"},
|
||||
Password: "openIM123",
|
||||
DB: 3,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
|
||||
Address: []string{"172.16.8.48:37017"},
|
||||
Database: "openim_v3",
|
||||
Username: "openIM",
|
||||
Password: "openIM123",
|
||||
MaxPoolSize: 100,
|
||||
MaxRetry: 1,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
|
||||
|
||||
res, err := seqUser.GetReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Log(res)
|
||||
|
||||
}
|
||||
27
pkg/common/storage/cache/redis/conversation.go
vendored
27
pkg/common/storage/cache/redis/conversation.go
vendored
@ -95,6 +95,10 @@ func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID strin
|
||||
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationUserMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationUserMaxVersionKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
@ -160,10 +164,12 @@ func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversati
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return batchGetCache(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(ctx context.Context, conversationID string) (*model.Conversation, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
}, func(conversation *model.Conversation) string {
|
||||
return conversation.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
})
|
||||
}
|
||||
|
||||
@ -233,6 +239,19 @@ func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(convers
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationVersionUserIDs(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getConversationUserMaxVersionKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) FindMaxConversationUserVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationUserMaxVersionKey(userID), c.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return c.conversationDB.FindConversationUserVersion(ctx, userID, 0, 0)
|
||||
})
|
||||
}
|
||||
|
||||
25
pkg/common/storage/cache/redis/friend.go
vendored
25
pkg/common/storage/cache/redis/friend.go
vendored
@ -70,10 +70,6 @@ func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
//func (f *FriendCacheRedis) getFriendSyncSortUserIDsKey(ownerUserID string) string {
|
||||
// return cachekey.GetFriendSyncSortUserIDsKey(ownerUserID, f.syncCount)
|
||||
//}
|
||||
|
||||
func (f *FriendCacheRedis) getFriendMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendMaxVersionKey(ownerUserID)
|
||||
}
|
||||
@ -107,16 +103,6 @@ func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) cache.FriendCach
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
//func (f *FriendCacheRedis) DelSortFriendUserIDs(ownerUserIDs ...string) cache.FriendCache {
|
||||
// newGroupCache := f.CloneFriendCache()
|
||||
// keys := make([]string, 0, len(ownerUserIDs))
|
||||
// for _, userID := range ownerUserIDs {
|
||||
// keys = append(keys, f.getFriendSyncSortUserIDsKey(userID))
|
||||
// }
|
||||
// newGroupCache.AddKeys(keys...)
|
||||
// return newGroupCache
|
||||
//}
|
||||
|
||||
// GetTwoWayFriendIDs retrieves two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
|
||||
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
|
||||
@ -193,17 +179,6 @@ func (f *FriendCacheRedis) DelMaxFriendVersion(ownerUserIDs ...string) cache.Fri
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
//func (f *FriendCacheRedis) FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
// userIDs, err := f.GetFriendIDs(ctx, ownerUserID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if len(userIDs) > f.syncCount {
|
||||
// userIDs = userIDs[:f.syncCount]
|
||||
// }
|
||||
// return userIDs, nil
|
||||
//}
|
||||
|
||||
func (f *FriendCacheRedis) FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendMaxVersionKey(ownerUserID), f.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return f.friendDB.FindIncrVersion(ctx, ownerUserID, 0, 0)
|
||||
|
||||
90
pkg/common/storage/cache/redis/group.go
vendored
90
pkg/common/storage/cache/redis/group.go
vendored
@ -118,34 +118,12 @@ func (g *GroupCacheRedis) getJoinGroupMaxVersionKey(userID string) string {
|
||||
return cachekey.GetJoinGroupMaxVersionKey(userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupIndex(group *model.Group, keys []string) (int, error) {
|
||||
key := g.getGroupInfoKey(group.GroupID)
|
||||
for i, _key := range keys {
|
||||
if _key == key {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errIndex
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberIndex(groupMember *model.GroupMember, keys []string) (int, error) {
|
||||
key := g.getGroupMemberInfoKey(groupMember.GroupID, groupMember.UserID)
|
||||
for i, _key := range keys {
|
||||
if _key == key {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errIndex
|
||||
func (g *GroupCacheRedis) getGroupID(group *model.Group) string {
|
||||
return group.GroupID
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupInfoKey(groupID)
|
||||
}, func(ctx context.Context, groupID string) (*model.Group, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, g.getGroupInfoKey, g.getGroupID, g.groupDB.Find)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
|
||||
@ -233,19 +211,6 @@ func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsMemberIDs(ctx context.Context, groupIDs []string) (map[string][]string, error) {
|
||||
m := make(map[string][]string)
|
||||
for _, groupID := range groupIDs {
|
||||
userIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[groupID] = userIDs
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
|
||||
@ -285,10 +250,12 @@ func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userI
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(ctx context.Context, userID string) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.UserID
|
||||
}, func(ctx context.Context, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Find(ctx, groupID, userIDs)
|
||||
})
|
||||
}
|
||||
|
||||
@ -301,14 +268,6 @@ func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID st
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMemberInfo(ctx context.Context, groupID string) ([]*model.GroupMember, error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
@ -388,42 +347,23 @@ func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, grou
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) (_ []*model.GroupMember, err error) {
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
|
||||
if len(groupIDs) == 0 {
|
||||
var err error
|
||||
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return batchGetCache(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.GroupID
|
||||
}, func(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.FindInGroup(ctx, userID, groupIDs)
|
||||
})
|
||||
}
|
||||
|
||||
//func (g *GroupCacheRedis) FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error) {
|
||||
// userIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if len(userIDs) > g.syncCount {
|
||||
// userIDs = userIDs[:g.syncCount]
|
||||
// }
|
||||
// return userIDs, nil
|
||||
//}
|
||||
//
|
||||
//func (g *GroupCacheRedis) FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error) {
|
||||
// groupIDs, err := g.GetJoinedGroupIDs(ctx, userID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if len(groupIDs) > g.syncCount {
|
||||
// groupIDs = groupIDs[:g.syncCount]
|
||||
// }
|
||||
// return groupIDs, nil
|
||||
//}
|
||||
|
||||
func (g *GroupCacheRedis) DelMaxGroupMemberVersion(groupIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
|
||||
1
pkg/common/storage/cache/redis/msg.go
vendored
1
pkg/common/storage/cache/redis/msg.go
vendored
@ -183,5 +183,4 @@ func (c *msgCache) GetMessagesBySeq(ctx context.Context, conversationID string,
|
||||
return nil, nil, err
|
||||
}
|
||||
return seqMsgs, failedSeqs, nil
|
||||
|
||||
}
|
||||
|
||||
89
pkg/common/storage/cache/redis/online.go
vendored
Normal file
89
pkg/common/storage/cache/redis/online.go
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
|
||||
return &userOnline{
|
||||
rdb: rdb,
|
||||
expire: cachekey.OnlineExpire,
|
||||
channelName: cachekey.OnlineChannel,
|
||||
}
|
||||
}
|
||||
|
||||
type userOnline struct {
|
||||
rdb redis.UniversalClient
|
||||
expire time.Duration
|
||||
channelName string
|
||||
}
|
||||
|
||||
func (s *userOnline) getUserOnlineKey(userID string) string {
|
||||
return cachekey.GetOnlineKey(userID)
|
||||
}
|
||||
|
||||
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
Max: "+inf",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(members))
|
||||
for _, member := range members {
|
||||
val, err := strconv.Atoi(member)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs = append(platformIDs, int32(val))
|
||||
}
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local score = ARGV[3]
|
||||
local num1 = redis.call("ZCARD", key)
|
||||
redis.call("ZREMRANGEBYSCORE", key, "-inf", ARGV[2])
|
||||
for i = 5, tonumber(ARGV[4])+4 do
|
||||
redis.call("ZREM", key, ARGV[i])
|
||||
end
|
||||
local num2 = redis.call("ZCARD", key)
|
||||
for i = 5+tonumber(ARGV[4]), #ARGV do
|
||||
redis.call("ZADD", key, score, ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", key, ARGV[1])
|
||||
local num3 = redis.call("ZCARD", key)
|
||||
local change = (num1 ~= num2) or (num2 ~= num3)
|
||||
if change then
|
||||
local members = redis.call("ZRANGE", key, 0, -1)
|
||||
table.insert(members, KEYS[2])
|
||||
redis.call("PUBLISH", KEYS[3], table.concat(members, ":"))
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`
|
||||
now := time.Now()
|
||||
argv := make([]any, 0, 2+len(online)+len(offline))
|
||||
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
|
||||
for _, platformID := range offline {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
for _, platformID := range online {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
keys := []string{s.getUserOnlineKey(userID), userID, s.channelName}
|
||||
if err := s.rdb.Eval(ctx, script, keys, argv).Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -2,6 +2,7 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -109,7 +110,7 @@ func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
|
||||
slots := make(map[int64][]string)
|
||||
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
|
||||
if isCluster {
|
||||
if isCluster && len(keys) > 1 {
|
||||
pipe := clusterClient.Pipeline()
|
||||
cmds := make([]*redis.IntCmd, len(keys))
|
||||
for i, key := range keys {
|
||||
@ -195,3 +196,16 @@ func ProcessKeysBySlot(
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteCacheBySlot(ctx context.Context, rcClient *rockscache.Client, keys []string) error {
|
||||
switch len(keys) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return rcClient.TagAsDeletedBatch2(ctx, keys)
|
||||
default:
|
||||
return ProcessKeysBySlot(ctx, getRocksCacheRedisClient(rcClient), keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return rcClient.TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
200
pkg/common/storage/cache/redis/seq.go
vendored
200
pkg/common/storage/cache/redis/seq.go
vendored
@ -1,200 +0,0 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func NewSeqCache(rdb redis.UniversalClient) cache.SeqCache {
|
||||
return &seqCache{rdb: rdb}
|
||||
}
|
||||
|
||||
type seqCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *seqCache) getMaxSeqKey(conversationID string) string {
|
||||
return cachekey.GetMaxSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getHasReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetHasReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) getConversationUserMinSeqKey(conversationID, userID string) string {
|
||||
return cachekey.GetConversationUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (c *seqCache) setSeq(ctx context.Context, conversationID string, seq int64, getkey func(conversationID string) string) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) getSeq(ctx context.Context, conversationID string, getkey func(conversationID string) string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, getkey(conversationID)).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) getSeqs(ctx context.Context, items []string, getkey func(s string) string) (m map[string]int64, err error) {
|
||||
m = make(map[string]int64, len(items))
|
||||
var (
|
||||
reverseMap = make(map[string]string, len(items))
|
||||
keys = make([]string, len(items))
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
for i, v := range items {
|
||||
keys[i] = getkey(v)
|
||||
reverseMap[getkey(v)] = v
|
||||
}
|
||||
|
||||
manager := NewRedisShardManager(c.rdb)
|
||||
if err = manager.ProcessKeysBySlot(ctx, keys, func(ctx context.Context, _ int64, keys []string) error {
|
||||
res, err := c.rdb.MGet(ctx, keys...).Result()
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
// len(res) <= len(items)
|
||||
for i := range res {
|
||||
strRes, ok := res[i].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
val := stringutil.StringToInt64(strRes)
|
||||
if val != 0 {
|
||||
lock.Lock()
|
||||
m[reverseMap[keys[i]]] = val
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
|
||||
return c.setSeq(ctx, conversationID, maxSeq, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (m map[string]int64, err error) {
|
||||
return c.getSeqs(ctx, conversationIDs, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return c.getSeq(ctx, conversationID, c.getMaxSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
|
||||
return c.setSeq(ctx, conversationID, minSeq, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) setSeqs(ctx context.Context, seqs map[string]int64, getkey func(key string) string) error {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := c.rdb.Set(ctx, getkey(conversationID), seq, 0).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *seqCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, seqs, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
return c.getSeqs(ctx, conversationIDs, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return c.getSeq(ctx, conversationID, c.getMinSeqKey)
|
||||
}
|
||||
|
||||
func (c *seqCache) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getConversationUserMinSeqKey(conversationID, userID)).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *seqCache) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (m map[string]int64, err error) {
|
||||
return c.getSeqs(ctx, userIDs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getConversationUserMinSeqKey(conversationID, userID), minSeq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
|
||||
return c.setSeqs(ctx, seqs, func(userID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error) {
|
||||
return c.setSeqs(ctx, seqs, func(conversationID string) string {
|
||||
return c.getConversationUserMinSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getHasReadSeqKey(conversationID, userID), hasReadSeq, 0).Err())
|
||||
}
|
||||
|
||||
func (c *seqCache) SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, hasReadSeqs, func(userID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
|
||||
return c.setSeqs(ctx, hasReadSeqs, func(conversationID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
return c.getSeqs(ctx, conversationIDs, func(conversationID string) string {
|
||||
return c.getHasReadSeqKey(conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *seqCache) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getHasReadSeqKey(conversationID, userID)).Int64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
333
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
333
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
@ -0,0 +1,333 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
|
||||
return &seqConversationCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
lockTime: time.Second * 3,
|
||||
dataTime: time.Hour * 24 * 365,
|
||||
minSeqExpireTime: time.Hour,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqConversation
|
||||
rocks *rockscache.Client
|
||||
lockTime time.Duration
|
||||
dataTime time.Duration
|
||||
minSeqExpireTime time.Duration
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMallocMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMinSeq(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]int64{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
|
||||
result := make([]*redis.StringCmd, len(keys))
|
||||
pipe := s.rdb.Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HGet(ctx, key, "CURR")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
req, err := r.Int64()
|
||||
if err == nil {
|
||||
seqs[keyConversationID[keys[i]]] = req
|
||||
} else if errors.Is(err, redis.Nil) {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
if len(notFoundKey) > 0 {
|
||||
conversationID := keyConversationID[notFoundKey[0]]
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]int64{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]int64, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
|
||||
return cachekey.GetMallocSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) (int64, error) {
|
||||
if lastSeq < currSeq {
|
||||
return 0, errs.New("lastSeq must be greater than currSeq")
|
||||
}
|
||||
// 0: success
|
||||
// 1: success the lock has expired, but has not been locked by anyone else
|
||||
// 2: already locked, but not by yourself
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local lockValue = ARGV[1]
|
||||
local dataSecond = ARGV[2]
|
||||
local curr_seq = tonumber(ARGV[3])
|
||||
local last_seq = tonumber(ARGV[4])
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 1
|
||||
end
|
||||
if redis.call("HGET", key, "LOCK") ~= lockValue then
|
||||
return 2
|
||||
end
|
||||
redis.call("HDEL", key, "LOCK")
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 0
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// malloc size=0 is to get the current seq size>0 is to allocate seq
|
||||
func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
|
||||
// 0: success
|
||||
// 1: need to obtain and lock
|
||||
// 2: already locked
|
||||
// 3: exceeded the maximum value and locked
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local size = tonumber(ARGV[1])
|
||||
local lockSecond = ARGV[2]
|
||||
local dataSecond = ARGV[3]
|
||||
local result = {}
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 1)
|
||||
table.insert(result, lockValue)
|
||||
return result
|
||||
end
|
||||
if redis.call("HEXISTS", key, "LOCK") == 1 then
|
||||
table.insert(result, 2)
|
||||
return result
|
||||
end
|
||||
local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
|
||||
local last_seq = tonumber(redis.call("HGET", key, "LAST"))
|
||||
if size == 0 then
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
return result
|
||||
end
|
||||
local max_seq = curr_seq + size
|
||||
if max_seq > last_seq then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("HSET", key, "CURR", last_seq)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 3)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, lockValue)
|
||||
return result
|
||||
end
|
||||
redis.call("HSET", key, "CURR", max_seq)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
return result
|
||||
`
|
||||
result, err := s.rdb.Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second)).Int64Slice()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
|
||||
timer := time.NewTimer(time.Second / 4)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64) {
|
||||
for i := 0; i < 10; i++ {
|
||||
state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch state {
|
||||
case 0: // ideal state
|
||||
case 1:
|
||||
log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
case 2:
|
||||
log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
default:
|
||||
log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
|
||||
if size == 0 {
|
||||
return 0
|
||||
}
|
||||
var basicSize int64
|
||||
if msgprocessor.IsGroupConversationID(conversationID) {
|
||||
basicSize = 100
|
||||
} else {
|
||||
basicSize = 50
|
||||
}
|
||||
basicSize += size
|
||||
return basicSize
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
if size < 0 {
|
||||
return 0, errs.New("size must be greater than 0")
|
||||
}
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
for i := 0; i < 10; i++ {
|
||||
states, err := s.malloc(ctx, key, size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch states[0] {
|
||||
case 0: // success
|
||||
return states[1], nil
|
||||
case 1: // not found
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize)
|
||||
return seq, nil
|
||||
case 2: // locked
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
case 3: // exceeded cache max value
|
||||
currSeq := states[1]
|
||||
lastSeq := states[2]
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if lastSeq == seq {
|
||||
s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize)
|
||||
return currSeq, nil
|
||||
} else {
|
||||
log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
|
||||
s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize)
|
||||
return seq, nil
|
||||
}
|
||||
default:
|
||||
log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
|
||||
return 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
|
||||
}
|
||||
}
|
||||
log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
|
||||
return 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return s.Malloc(ctx, conversationID, 0)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
keys = append(keys, s.getMinSeqKey(conversationID))
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
||||
109
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
109
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTestSeq() *seqConversationCacheRedis {
|
||||
mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 1,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
|
||||
}
|
||||
|
||||
func TestSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
speed atomic.Int64
|
||||
)
|
||||
|
||||
const count = 128
|
||||
wg.Add(count)
|
||||
for i := 0; i < count; i++ {
|
||||
index := i + 1
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var size int64 = 10
|
||||
cID := strconv.Itoa(index * 1)
|
||||
for i := 1; ; i++ {
|
||||
//first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
|
||||
first, err := ts.Malloc(context.Background(), cID, size) // redis
|
||||
if err != nil {
|
||||
t.Logf("[%d-%d] %s %s", index, i, cID, err)
|
||||
return
|
||||
}
|
||||
speed.Add(size)
|
||||
_ = first
|
||||
//t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
value := speed.Swap(0)
|
||||
t.Logf("speed: %d/s", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDel(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
for i := 1; i < 100; i++ {
|
||||
var size int64 = 100
|
||||
first, err := ts.Malloc(context.Background(), "100", size)
|
||||
if err != nil {
|
||||
t.Logf("[%d] %s", i, err)
|
||||
return
|
||||
}
|
||||
t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeqMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeq(context.Background(), "100"))
|
||||
}
|
||||
|
||||
func TestMinSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMinSeq(context.Background(), "10000000"))
|
||||
}
|
||||
185
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
185
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
|
||||
return &seqUserCacheRedis{
|
||||
rdb: rdb,
|
||||
mgo: mgo,
|
||||
readSeqWriteRatio: 100,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
readExpireTime: time.Hour * 24 * 30,
|
||||
rocks: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
}
|
||||
|
||||
type seqUserCacheRedis struct {
|
||||
rdb redis.UniversalClient
|
||||
mgo database.SeqUser
|
||||
rocks *rockscache.Client
|
||||
expireTime time.Duration
|
||||
readExpireTime time.Duration
|
||||
readSeqWriteRatio int64
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if err := s.mgo.SetMaxSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.rocks.TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if seq%s.readSeqWriteRatio == 0 {
|
||||
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.rocks.RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) setRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64)
|
||||
for conversationID, seq := range seqs {
|
||||
key := s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rdb, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
pipe := s.rdb.Pipeline()
|
||||
for _, key := range keys {
|
||||
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
|
||||
pipe.Expire(ctx, key, s.readExpireTime)
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.setRedisReadSeqs(ctx, userID, seqs); err != nil {
|
||||
return err
|
||||
}
|
||||
for conversationID, seq := range seqs {
|
||||
if seq%s.readSeqWriteRatio == 0 {
|
||||
if err := s.mgo.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
|
||||
return s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
}, func(v *readSeqModel) string {
|
||||
return v.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
|
||||
seqs, err := s.mgo.GetReadSeqs(ctx, userID, conversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*readSeqModel, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make(map[string]int64)
|
||||
for _, v := range res {
|
||||
data[v.ConversationID] = v.Seq
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
|
||||
|
||||
type readSeqModel struct {
|
||||
ConversationID string
|
||||
Seq int64
|
||||
}
|
||||
|
||||
func (r *readSeqModel) BatchCache(conversationID string) {
|
||||
r.ConversationID = conversationID
|
||||
}
|
||||
|
||||
func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
|
||||
r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *readSeqModel) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatInt(r.Seq, 10)), nil
|
||||
}
|
||||
79
pkg/common/storage/cache/redis/seq_user_test.go
vendored
Normal file
79
pkg/common/storage/cache/redis/seq_user_test.go
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newTestOnline() *userOnline {
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 0,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
|
||||
}
|
||||
|
||||
func TestOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
var count atomic.Int64
|
||||
for i := 0; i < 64; i++ {
|
||||
go func(userID string) {
|
||||
var err error
|
||||
for i := 0; ; i++ {
|
||||
if i%2 == 0 {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
|
||||
} else {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
count.Add(1)
|
||||
}
|
||||
}(strconv.Itoa(10000 + i))
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
for range ticker.C {
|
||||
t.Log(count.Swap(0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pIDs, err := ts.GetOnline(ctx, "10000")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(pIDs)
|
||||
}
|
||||
|
||||
func TestRecvOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
|
||||
|
||||
_, err := pubsub.Receive(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not subscribe: %v", err)
|
||||
}
|
||||
|
||||
ch := pubsub.Channel()
|
||||
|
||||
for msg := range ch {
|
||||
fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
|
||||
}
|
||||
}
|
||||
188
pkg/common/storage/cache/redis/user.go
vendored
188
pkg/common/storage/cache/redis/user.go
vendored
@ -16,21 +16,14 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -61,8 +54,8 @@ func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getOnlineStatusKey(modKey string) string {
|
||||
return cachekey.GetOnlineStatusKey(modKey)
|
||||
func (u *UserCacheRedis) getUserID(user *model.User) string {
|
||||
return user.UserID
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
|
||||
@ -90,11 +83,7 @@ func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userIn
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error) {
|
||||
return batchGetCache(ctx, u.rcClient, u.expireTime, userIDs, func(userID string) string {
|
||||
return u.getUserInfoKey(userID)
|
||||
}, func(ctx context.Context, userID string) (*model.User, error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
})
|
||||
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, u.getUserInfoKey, u.getUserID, u.userDB.Find)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) cache.UserCache {
|
||||
@ -130,174 +119,3 @@ func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserC
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
// GetUserStatus get user status.
|
||||
func (u *UserCacheRedis) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
|
||||
userStatus := make([]*user.OnlineStatus, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
|
||||
modKey := strconv.Itoa(int(UserIDNum % statusMod))
|
||||
var onlineStatus user.OnlineStatus
|
||||
key := u.getOnlineStatusKey(modKey)
|
||||
result, err := u.rdb.HGet(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
// key or field does not exist
|
||||
userStatus = append(userStatus, &user.OnlineStatus{
|
||||
UserID: userID,
|
||||
Status: constant.Offline,
|
||||
PlatformIDs: nil,
|
||||
})
|
||||
|
||||
continue
|
||||
} else {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
err = json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
onlineStatus.UserID = userID
|
||||
onlineStatus.Status = constant.Online
|
||||
userStatus = append(userStatus, &onlineStatus)
|
||||
}
|
||||
|
||||
return userStatus, nil
|
||||
}
|
||||
|
||||
// SetUserStatus Set the user status and save it in redis.
|
||||
func (u *UserCacheRedis) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
|
||||
UserIDNum := crc32.ChecksumIEEE([]byte(userID))
|
||||
modKey := strconv.Itoa(int(UserIDNum % statusMod))
|
||||
key := u.getOnlineStatusKey(modKey)
|
||||
log.ZDebug(ctx, "SetUserStatus args", "userID", userID, "status", status, "platformID", platformID, "modKey", modKey, "key", key)
|
||||
isNewKey, err := u.rdb.Exists(ctx, key).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if isNewKey == 0 {
|
||||
if status == constant.Online {
|
||||
onlineStatus := user.OnlineStatus{
|
||||
UserID: userID,
|
||||
Status: constant.Online,
|
||||
PlatformIDs: []int32{platformID},
|
||||
}
|
||||
jsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(jsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
u.rdb.Expire(ctx, key, userOlineStatusExpireTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
isNil := false
|
||||
result, err := u.rdb.HGet(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
isNil = true
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
if status == constant.Offline {
|
||||
err = u.refreshStatusOffline(ctx, userID, status, platformID, isNil, err, result, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = u.refreshStatusOnline(ctx, userID, platformID, isNil, err, result, key)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) refreshStatusOffline(ctx context.Context, userID string, status, platformID int32, isNil bool, err error, result, key string) error {
|
||||
if isNil {
|
||||
log.ZWarn(ctx, "this user not online,maybe trigger order not right",
|
||||
err, "userStatus", status)
|
||||
|
||||
return nil
|
||||
}
|
||||
var onlineStatus user.OnlineStatus
|
||||
err = json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var newPlatformIDs []int32
|
||||
for _, val := range onlineStatus.PlatformIDs {
|
||||
if val != platformID {
|
||||
newPlatformIDs = append(newPlatformIDs, val)
|
||||
}
|
||||
}
|
||||
if newPlatformIDs == nil {
|
||||
_, err = u.rdb.HDel(ctx, key, userID).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
onlineStatus.PlatformIDs = newPlatformIDs
|
||||
newjsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) refreshStatusOnline(ctx context.Context, userID string, platformID int32, isNil bool, err error, result, key string) error {
|
||||
var onlineStatus user.OnlineStatus
|
||||
if !isNil {
|
||||
err := json.Unmarshal([]byte(result), &onlineStatus)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
onlineStatus.PlatformIDs = RemoveRepeatedElementsInList(append(onlineStatus.PlatformIDs, platformID))
|
||||
} else {
|
||||
onlineStatus.PlatformIDs = append(onlineStatus.PlatformIDs, platformID)
|
||||
}
|
||||
onlineStatus.Status = constant.Online
|
||||
onlineStatus.UserID = userID
|
||||
newjsonData, err := json.Marshal(&onlineStatus)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "json.Marshal failed")
|
||||
}
|
||||
_, err = u.rdb.HSet(ctx, key, userID, string(newjsonData)).Result()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Comparable interface {
|
||||
~int | ~string | ~float64 | ~int32
|
||||
}
|
||||
|
||||
func RemoveRepeatedElementsInList[T Comparable](slc []T) []T {
|
||||
var result []T
|
||||
tempMap := map[T]struct{}{}
|
||||
for _, e := range slc {
|
||||
if _, found := tempMap[e]; !found {
|
||||
tempMap[e] = struct{}{}
|
||||
result = append(result, e)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
30
pkg/common/storage/cache/seq.go
vendored
30
pkg/common/storage/cache/seq.go
vendored
@ -1,30 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type SeqCache interface {
|
||||
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
|
||||
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
|
||||
// seqs map: key userID value minSeq
|
||||
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
|
||||
// seqs map: key conversationID value minSeq
|
||||
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
// has read seq
|
||||
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
|
||||
// k: user, v: seq
|
||||
SetHasReadSeqs(ctx context.Context, conversationID string, hasReadSeqs map[string]int64) error
|
||||
// k: conversation, v :seq
|
||||
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
|
||||
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
|
||||
}
|
||||
12
pkg/common/storage/cache/seq_conversation.go
vendored
Normal file
12
pkg/common/storage/cache/seq_conversation.go
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqConversationCache interface {
|
||||
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
}
|
||||
15
pkg/common/storage/cache/seq_user.go
vendored
Normal file
15
pkg/common/storage/cache/seq_user.go
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqUser interface {
|
||||
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
SetMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
SetReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
GetReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
}
|
||||
5
pkg/common/storage/cache/user.go
vendored
5
pkg/common/storage/cache/user.go
vendored
@ -17,7 +17,6 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/protocol/user"
|
||||
)
|
||||
|
||||
type UserCache interface {
|
||||
@ -28,6 +27,6 @@ type UserCache interface {
|
||||
DelUsersInfo(userIDs ...string) UserCache
|
||||
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
|
||||
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
|
||||
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
//GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
//SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
}
|
||||
|
||||
@ -66,6 +66,9 @@ type ConversationDatabase interface {
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
// FindRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) ([]string, error)
|
||||
FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*relationtb.VersionLog, error)
|
||||
FindMaxConversationUserVersionCache(ctx context.Context, userID string) (*relationtb.VersionLog, error)
|
||||
GetOwnerConversation(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*relationtb.Conversation, error)
|
||||
}
|
||||
|
||||
func NewConversationDatabase(conversation database.Conversation, cache cache.ConversationCache, tx tx.Tx) ConversationDatabase {
|
||||
@ -106,6 +109,7 @@ func (c *conversationDatabase) SetUsersConversationFieldTx(ctx context.Context,
|
||||
if _, ok := fieldMap["recv_msg_opt"]; ok {
|
||||
cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID)
|
||||
}
|
||||
cache = cache.DelConversationVersionUserIDs(haveUserIDs...)
|
||||
}
|
||||
NotUserIDs := stringutil.DifferenceString(haveUserIDs, userIDs)
|
||||
log.ZDebug(ctx, "SetUsersConversationFieldTx", "NotUserIDs", NotUserIDs, "haveUserIDs", haveUserIDs, "userIDs", userIDs)
|
||||
@ -137,7 +141,7 @@ func (c *conversationDatabase) UpdateUsersConversationField(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
cache := c.cache.CloneConversationCache()
|
||||
cache = cache.DelUsersConversation(conversationID, userIDs...)
|
||||
cache = cache.DelUsersConversation(conversationID, userIDs...).DelConversationVersionUserIDs(userIDs...)
|
||||
if _, ok := args["recv_msg_opt"]; ok {
|
||||
cache = cache.DelConversationNotReceiveMessageUserIDs(conversationID)
|
||||
}
|
||||
@ -155,13 +159,14 @@ func (c *conversationDatabase) CreateConversation(ctx context.Context, conversat
|
||||
cache = cache.DelConversationNotReceiveMessageUserIDs(conversation.ConversationID)
|
||||
userIDs = append(userIDs, conversation.OwnerUserID)
|
||||
}
|
||||
return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).ChainExecDel(ctx)
|
||||
return cache.DelConversationIDs(userIDs...).DelUserConversationIDsHash(userIDs...).DelConversationVersionUserIDs(userIDs...).ChainExecDel(ctx)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) SyncPeerUserPrivateConversationTx(ctx context.Context, conversations []*relationtb.Conversation) error {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.CloneConversationCache()
|
||||
for _, conversation := range conversations {
|
||||
cache = cache.DelConversationVersionUserIDs(conversation.OwnerUserID)
|
||||
for _, v := range [][2]string{{conversation.OwnerUserID, conversation.UserID}, {conversation.UserID, conversation.OwnerUserID}} {
|
||||
ownerUserID := v[0]
|
||||
userID := v[1]
|
||||
@ -207,6 +212,7 @@ func (c *conversationDatabase) GetUserAllConversation(ctx context.Context, owner
|
||||
func (c *conversationDatabase) SetUserConversations(ctx context.Context, ownerUserID string, conversations []*relationtb.Conversation) error {
|
||||
return c.tx.Transaction(ctx, func(ctx context.Context) error {
|
||||
cache := c.cache.CloneConversationCache()
|
||||
cache = cache.DelConversationVersionUserIDs(ownerUserID)
|
||||
groupIDs := datautil.Distinct(datautil.Filter(conversations, func(e *relationtb.Conversation) (string, bool) {
|
||||
return e.GroupID, e.GroupID != ""
|
||||
}))
|
||||
@ -322,3 +328,28 @@ func (c *conversationDatabase) GetConversationIDsNeedDestruct(ctx context.Contex
|
||||
func (c *conversationDatabase) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return c.cache.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*relationtb.VersionLog, error) {
|
||||
return c.conversationDB.FindConversationUserVersion(ctx, userID, version, limit)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) FindMaxConversationUserVersionCache(ctx context.Context, userID string) (*relationtb.VersionLog, error) {
|
||||
return c.cache.FindMaxConversationUserVersion(ctx, userID)
|
||||
}
|
||||
|
||||
func (c *conversationDatabase) GetOwnerConversation(ctx context.Context, ownerUserID string, pagination pagination.Pagination) (int64, []*relationtb.Conversation, error) {
|
||||
conversationIDs, err := c.cache.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
findConversationIDs := datautil.Paginate(conversationIDs, int(pagination.GetPageNumber()), int(pagination.GetShowNumber()))
|
||||
conversations := make([]*relationtb.Conversation, 0, len(findConversationIDs))
|
||||
for _, conversationID := range findConversationIDs {
|
||||
conversation, err := c.cache.GetConversation(ctx, ownerUserID, conversationID)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
conversations = append(conversations, conversation)
|
||||
}
|
||||
return int64(len(conversationIDs)), conversations, nil
|
||||
}
|
||||
|
||||
@ -69,26 +69,19 @@ type CommonMsgDatabase interface {
|
||||
DeleteUserMsgsBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) error
|
||||
// DeleteMsgsPhysicalBySeqs physically deletes messages by emptying them based on sequence numbers.
|
||||
DeleteMsgsPhysicalBySeqs(ctx context.Context, conversationID string, seqs []int64) error
|
||||
SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
//SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
|
||||
GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error)
|
||||
SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error
|
||||
SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error)
|
||||
SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) (err error)
|
||||
SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error
|
||||
GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error)
|
||||
UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error
|
||||
|
||||
GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
|
||||
GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
|
||||
//GetMongoMaxAndMinSeq(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo int64, err error)
|
||||
//GetConversationMinMaxSeqInMongoAndCache(ctx context.Context, conversationID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
|
||||
SetSendMsgStatus(ctx context.Context, id string, status int32) error
|
||||
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
|
||||
SearchMessage(ctx context.Context, req *pbmsg.SearchMessageReq) (total int32, msgData []*sdkws.MsgData, err error)
|
||||
@ -108,7 +101,7 @@ type CommonMsgDatabase interface {
|
||||
DeleteDocMsgBefore(ctx context.Context, ts int64, doc *model.MsgDocModel) ([]int, error)
|
||||
}
|
||||
|
||||
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cache.SeqCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
|
||||
func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seqUser cache.SeqUser, seqConversation cache.SeqConversationCache, kafkaConf *config.Kafka) (CommonMsgDatabase, error) {
|
||||
conf, err := kafka.BuildProducerConfig(*kafkaConf.Build())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -128,29 +121,20 @@ func NewCommonMsgDatabase(msgDocModel database.Msg, msg cache.MsgCache, seq cach
|
||||
return &commonMsgDatabase{
|
||||
msgDocDatabase: msgDocModel,
|
||||
msg: msg,
|
||||
seq: seq,
|
||||
seqUser: seqUser,
|
||||
seqConversation: seqConversation,
|
||||
producer: producerToRedis,
|
||||
producerToMongo: producerToMongo,
|
||||
producerToPush: producerToPush,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func InitCommonMsgDatabase(rdb redis.UniversalClient, database *mongo.Database, config *tools.CronTaskConfig) (CommonMsgDatabase, error) {
|
||||
// msgDocModel, err := database.NewMsgMongo(database)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// //todo MsgCacheTimeout
|
||||
// msg := cache.NewMsgCache(rdb, 86400, config.RedisConfig.EnablePipeline)
|
||||
// seq := cache.NewSeqCache(rdb)
|
||||
// return NewCommonMsgDatabase(msgDocModel, msg, seq, &config.KafkaConfig)
|
||||
//}
|
||||
|
||||
type commonMsgDatabase struct {
|
||||
msgDocDatabase database.Msg
|
||||
msgTable model.MsgDocModel
|
||||
msg cache.MsgCache
|
||||
seq cache.SeqCache
|
||||
seqConversation cache.SeqConversationCache
|
||||
seqUser cache.SeqUser
|
||||
producer *kafka.Producer
|
||||
producerToMongo *kafka.Producer
|
||||
producerToPush *kafka.Producer
|
||||
@ -348,12 +332,16 @@ func (db *commonMsgDatabase) DeleteMessagesFromCache(ctx context.Context, conver
|
||||
return db.msg.DeleteMessagesFromCache(ctx, conversationID, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
|
||||
currentMaxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
log.ZError(ctx, "storage.seq.GetMaxSeq", err)
|
||||
return 0, false, err
|
||||
func (db *commonMsgDatabase) setHasReadSeqs(ctx context.Context, conversationID string, userSeqMap map[string]int64) error {
|
||||
for userID, seq := range userSeqMap {
|
||||
if err := db.seqUser.SetReadSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversationID string, msgs []*sdkws.MsgData) (seq int64, isNew bool, err error) {
|
||||
lenList := len(msgs)
|
||||
if int64(lenList) > db.msgTable.GetSingleGocMsgNum() {
|
||||
return 0, false, errs.New("message count exceeds limit", "limit", db.msgTable.GetSingleGocMsgNum()).Wrap()
|
||||
@ -361,9 +349,12 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
|
||||
if lenList < 1 {
|
||||
return 0, false, errs.New("no messages to insert", "minCount", 1).Wrap()
|
||||
}
|
||||
if errs.Unwrap(err) == redis.Nil {
|
||||
isNew = true
|
||||
currentMaxSeq, err := db.seqConversation.Malloc(ctx, conversationID, int64(len(msgs)))
|
||||
if err != nil {
|
||||
log.ZError(ctx, "storage.seq.Malloc", err)
|
||||
return 0, false, err
|
||||
}
|
||||
isNew = currentMaxSeq == 0
|
||||
lastMaxSeq := currentMaxSeq
|
||||
userSeqMap := make(map[string]int64)
|
||||
for _, m := range msgs {
|
||||
@ -379,14 +370,7 @@ func (db *commonMsgDatabase) BatchInsertChat2Cache(ctx context.Context, conversa
|
||||
} else {
|
||||
prommetrics.MsgInsertRedisSuccessCounter.Inc()
|
||||
}
|
||||
|
||||
err = db.seq.SetMaxSeq(ctx, conversationID, currentMaxSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "storage.seq.SetMaxSeq error", err, "conversationID", conversationID)
|
||||
prommetrics.SeqSetFailedCounter.Inc()
|
||||
}
|
||||
|
||||
err = db.seq.SetHasReadSeqs(ctx, conversationID, userSeqMap)
|
||||
err = db.setHasReadSeqs(ctx, conversationID, userSeqMap)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "SetHasReadSeqs error", err, "userSeqMap", userSeqMap, "conversationID", conversationID)
|
||||
prommetrics.SeqSetFailedCounter.Inc()
|
||||
@ -514,12 +498,12 @@ func (db *commonMsgDatabase) getMsgBySeqsRange(ctx context.Context, userID strin
|
||||
// "userMinSeq" can be set as the same value as the conversation's "maxSeq" at the moment they join the group.
|
||||
// This ensures that their message retrieval starts from the point they joined.
|
||||
func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID string, conversationID string, begin, end, num, userMaxSeq int64) (int64, int64, []*sdkws.MsgData, error) {
|
||||
userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
|
||||
userMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
if userMinSeq > minSeq {
|
||||
@ -530,8 +514,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
log.ZWarn(ctx, "minSeq > end", errs.New("minSeq>end"), "minSeq", minSeq, "end", end)
|
||||
return 0, 0, nil, nil
|
||||
}
|
||||
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "GetMsgBySeqsRange", "userMinSeq", userMinSeq, "conMinSeq", minSeq, "conMaxSeq", maxSeq, "userMaxSeq", userMaxSeq)
|
||||
@ -571,11 +555,8 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
var successMsgs []*sdkws.MsgData
|
||||
log.ZDebug(ctx, "GetMsgBySeqsRange", "first seqs", seqs, "newBegin", newBegin, "newEnd", newEnd)
|
||||
cachedMsgs, failedSeqs, err := db.msg.GetMessagesBySeq(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
if err != redis.Nil {
|
||||
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
|
||||
}
|
||||
if err != nil && !errors.Is(err, redis.Nil) {
|
||||
log.ZError(ctx, "get message from redis exception", err, "conversationID", conversationID, "seqs", seqs)
|
||||
}
|
||||
successMsgs = append(successMsgs, cachedMsgs...)
|
||||
log.ZDebug(ctx, "get msgs from cache", "cachedMsgs", cachedMsgs)
|
||||
@ -595,16 +576,16 @@ func (db *commonMsgDatabase) GetMsgBySeqsRange(ctx context.Context, userID strin
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetMsgBySeqs(ctx context.Context, userID string, conversationID string, seqs []int64) (int64, int64, []*sdkws.MsgData, error) {
|
||||
userMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
|
||||
userMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
minSeq, err := db.seq.GetMinSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
minSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
maxSeq, err := db.seqConversation.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
if userMinSeq < minSeq {
|
||||
@ -648,7 +629,7 @@ func (db *commonMsgDatabase) DeleteConversationMsgsAndSetMinSeq(ctx context.Cont
|
||||
if minSeq == 0 {
|
||||
return nil
|
||||
}
|
||||
return db.seq.SetMinSeq(ctx, conversationID, minSeq)
|
||||
return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string, conversationID string, destructTime int64, lastMsgDestructTime time.Time) (seqs []int64, err error) {
|
||||
@ -693,12 +674,12 @@ func (db *commonMsgDatabase) UserMsgsDestruct(ctx context.Context, userID string
|
||||
log.ZDebug(ctx, "UserMsgsDestruct", "conversationID", conversationID, "userID", userID, "seqs", seqs)
|
||||
if len(seqs) > 0 {
|
||||
userMinSeq := seqs[len(seqs)-1] + 1
|
||||
currentUserMinSeq, err := db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
|
||||
if err != nil && errs.Unwrap(err) != redis.Nil {
|
||||
currentUserMinSeq, err := db.seqUser.GetMinSeq(ctx, conversationID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if currentUserMinSeq < userMinSeq {
|
||||
if err := db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
|
||||
if err := db.seqUser.SetMinSeq(ctx, conversationID, userID, userMinSeq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -796,89 +777,40 @@ func (db *commonMsgDatabase) DeleteUserMsgsBySeqs(ctx context.Context, userID st
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) DeleteMsgsBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) CleanUpUserConversationsMsgs(ctx context.Context, user string, conversationIDs []string) {
|
||||
for _, conversationID := range conversationIDs {
|
||||
maxSeq, err := db.seq.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
if err == redis.Nil {
|
||||
log.ZDebug(ctx, "max seq is nil", "conversationID", conversationID)
|
||||
} else {
|
||||
log.ZError(ctx, "get max seq failed", err, "conversationID", conversationID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := db.seq.SetMinSeq(ctx, conversationID, maxSeq+1); err != nil {
|
||||
log.ZError(ctx, "set min seq failed", err, "conversationID", conversationID, "minSeq", maxSeq+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetMaxSeq(ctx context.Context, conversationID string, maxSeq int64) error {
|
||||
return db.seq.SetMaxSeq(ctx, conversationID, maxSeq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
return db.seq.GetMaxSeqs(ctx, conversationIDs)
|
||||
return db.seqConversation.GetMaxSeqs(ctx, conversationIDs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return db.seq.GetMaxSeq(ctx, conversationID)
|
||||
return db.seqConversation.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetMinSeq(ctx context.Context, conversationID string, minSeq int64) error {
|
||||
return db.seq.SetMinSeq(ctx, conversationID, minSeq)
|
||||
return db.seqConversation.SetMinSeq(ctx, conversationID, minSeq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
return db.seq.SetMinSeqs(ctx, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetMinSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
return db.seq.GetMinSeqs(ctx, conversationIDs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return db.seq.GetMinSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetConversationUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return db.seq.GetConversationUserMinSeq(ctx, conversationID, userID)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetConversationUserMinSeqs(ctx context.Context, conversationID string, userIDs []string) (map[string]int64, error) {
|
||||
return db.seq.GetConversationUserMinSeqs(ctx, conversationID, userIDs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetConversationUserMinSeq(ctx context.Context, conversationID string, userID string, minSeq int64) error {
|
||||
return db.seq.SetConversationUserMinSeq(ctx, conversationID, userID, minSeq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetConversationUserMinSeqs(ctx context.Context, conversationID string, seqs map[string]int64) (err error) {
|
||||
return db.seq.SetConversationUserMinSeqs(ctx, conversationID, seqs)
|
||||
return db.seqConversation.SetMinSeqs(ctx, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetUserConversationsMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
return db.seq.SetUserConversationsMinSeqs(ctx, userID, seqs)
|
||||
return db.seqUser.SetMinSeqs(ctx, userID, seqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) UserSetHasReadSeqs(ctx context.Context, userID string, hasReadSeqs map[string]int64) error {
|
||||
return db.seq.UserSetHasReadSeqs(ctx, userID, hasReadSeqs)
|
||||
return db.seqUser.SetReadSeqs(ctx, userID, hasReadSeqs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetHasReadSeq(ctx context.Context, userID string, conversationID string, hasReadSeq int64) error {
|
||||
return db.seq.SetHasReadSeq(ctx, userID, conversationID, hasReadSeq)
|
||||
return db.seqUser.SetReadSeq(ctx, conversationID, userID, hasReadSeq)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetHasReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
return db.seq.GetHasReadSeqs(ctx, userID, conversationIDs)
|
||||
return db.seqUser.GetReadSeqs(ctx, userID, conversationIDs)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) GetHasReadSeq(ctx context.Context, userID string, conversationID string) (int64, error) {
|
||||
return db.seq.GetHasReadSeq(ctx, userID, conversationID)
|
||||
return db.seqUser.GetReadSeq(ctx, conversationID, userID)
|
||||
}
|
||||
|
||||
func (db *commonMsgDatabase) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
@ -894,11 +826,11 @@ func (db *commonMsgDatabase) GetConversationMinMaxSeqInMongoAndCache(ctx context
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
minSeqCache, err = db.seq.GetMinSeq(ctx, conversationID)
|
||||
minSeqCache, err = db.seqConversation.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
maxSeqCache, err = db.seq.GetMaxSeq(ctx, conversationID)
|
||||
maxSeqCache, err = db.seqConversation.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -1010,33 +942,8 @@ func (db *commonMsgDatabase) DeleteDocMsgBefore(ctx context.Context, ts int64, d
|
||||
}
|
||||
}
|
||||
|
||||
//func (db *commonMsgDatabase) ClearMsg(ctx context.Context, ts int64) (err error) {
|
||||
// var (
|
||||
// docNum int
|
||||
// msgNum int
|
||||
// start = time.Now()
|
||||
// )
|
||||
// for {
|
||||
// msgs, err := db.msgDocDatabase.GetBeforeMsg(ctx, ts, 100)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if len(msgs) == 0 {
|
||||
// return nil
|
||||
// }
|
||||
// for _, msg := range msgs {
|
||||
// num, err := db.deleteOneMsg(ctx, ts, msg)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// docNum++
|
||||
// msgNum += num
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
dbSeq, err := db.seq.GetMinSeq(ctx, conversationID)
|
||||
dbSeq, err := db.seqConversation.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
if errors.Is(errs.Unwrap(err), redis.Nil) {
|
||||
return nil
|
||||
@ -1046,5 +953,5 @@ func (db *commonMsgDatabase) setMinSeq(ctx context.Context, conversationID strin
|
||||
if dbSeq >= seq {
|
||||
return nil
|
||||
}
|
||||
return db.seq.SetMinSeq(ctx, conversationID, seq)
|
||||
return db.seqConversation.SetMinSeq(ctx, conversationID, seq)
|
||||
}
|
||||
|
||||
@ -16,13 +16,15 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
redisCache "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@ -38,20 +40,27 @@ type S3Database interface {
|
||||
SetObject(ctx context.Context, info *model.Object) error
|
||||
StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error)
|
||||
FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error)
|
||||
FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
|
||||
DeleteObject(ctx context.Context, name string) error
|
||||
DeleteSpecifiedData(ctx context.Context, engine string, name string) error
|
||||
FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
|
||||
DelS3Key(ctx context.Context, engine string, keys ...string) error
|
||||
}
|
||||
|
||||
func NewS3Database(rdb redis.UniversalClient, s3 s3.Interface, obj database.ObjectInfo) S3Database {
|
||||
return &s3Database{
|
||||
s3: cont.New(redis2.NewS3Cache(rdb, s3), s3),
|
||||
cache: redis2.NewObjectCacheRedis(rdb, obj),
|
||||
db: obj,
|
||||
s3: cont.New(redisCache.NewS3Cache(rdb, s3), s3),
|
||||
cache: redisCache.NewObjectCacheRedis(rdb, obj),
|
||||
s3cache: redisCache.NewS3Cache(rdb, s3),
|
||||
db: obj,
|
||||
}
|
||||
}
|
||||
|
||||
type s3Database struct {
|
||||
s3 *cont.Controller
|
||||
cache cache.ObjectCache
|
||||
db database.ObjectInfo
|
||||
s3 *cont.Controller
|
||||
cache cache.ObjectCache
|
||||
s3cache cont.S3Cache
|
||||
db database.ObjectInfo
|
||||
}
|
||||
|
||||
func (s *s3Database) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
@ -111,3 +120,22 @@ func (s *s3Database) StatObject(ctx context.Context, name string) (*s3.ObjectInf
|
||||
func (s *s3Database) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
|
||||
return s.s3.FormData(ctx, name, size, contentType, duration)
|
||||
}
|
||||
func (s *s3Database) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
|
||||
|
||||
return s.db.FindByExpires(ctx, duration, pagination)
|
||||
}
|
||||
|
||||
func (s *s3Database) DeleteObject(ctx context.Context, name string) error {
|
||||
return s.s3.DeleteObject(ctx, name)
|
||||
}
|
||||
func (s *s3Database) DeleteSpecifiedData(ctx context.Context, engine string, name string) error {
|
||||
return s.db.Delete(ctx, engine, name)
|
||||
}
|
||||
|
||||
func (s *s3Database) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
|
||||
return s.db.FindNotDelByS3(ctx, key, duration)
|
||||
}
|
||||
|
||||
func (s *s3Database) DelS3Key(ctx context.Context, engine string, keys ...string) error {
|
||||
return s.s3cache.DelS3Key(ctx, engine, keys...)
|
||||
}
|
||||
|
||||
@ -16,9 +16,10 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
|
||||
@ -70,10 +70,6 @@ type UserDatabase interface {
|
||||
GetAllSubscribeList(ctx context.Context, userID string) ([]string, error)
|
||||
// GetSubscribedList Get all subscribed lists
|
||||
GetSubscribedList(ctx context.Context, userID string) ([]string, error)
|
||||
// GetUserStatus Get the online status of the user
|
||||
GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
// SetUserStatus Set the user status and store the user status in redis
|
||||
SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
|
||||
// CRUD user command
|
||||
AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error
|
||||
@ -199,7 +195,7 @@ func (u *userDatabase) GetAllUserID(ctx context.Context, pagination pagination.P
|
||||
}
|
||||
|
||||
func (u *userDatabase) GetUserByID(ctx context.Context, userID string) (user *model.User, err error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
return u.cache.GetUserInfo(ctx, userID)
|
||||
}
|
||||
|
||||
// CountTotal Get the total number of users.
|
||||
@ -246,17 +242,6 @@ func (u *userDatabase) GetSubscribedList(ctx context.Context, userID string) ([]
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// GetUserStatus get user status.
|
||||
func (u *userDatabase) GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error) {
|
||||
onlineStatusList, err := u.cache.GetUserStatus(ctx, userIDs)
|
||||
return onlineStatusList, err
|
||||
}
|
||||
|
||||
// SetUserStatus Set the user status and save it in redis.
|
||||
func (u *userDatabase) SetUserStatus(ctx context.Context, userID string, status, platformID int32) error {
|
||||
return u.cache.SetUserStatus(ctx, userID, status, platformID)
|
||||
}
|
||||
|
||||
func (u *userDatabase) AddUserCommand(ctx context.Context, userID string, Type int32, UUID string, value string, ex string) error {
|
||||
return u.userDB.AddUserCommand(ctx, userID, Type, UUID, value, ex)
|
||||
}
|
||||
|
||||
@ -22,7 +22,6 @@ import (
|
||||
|
||||
type Conversation interface {
|
||||
Create(ctx context.Context, conversations []*model.Conversation) (err error)
|
||||
Delete(ctx context.Context, groupIDs []string) (err error)
|
||||
UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (rows int64, err error)
|
||||
Update(ctx context.Context, conversation *model.Conversation) (err error)
|
||||
Find(ctx context.Context, ownerUserID string, conversationIDs []string) (conversations []*model.Conversation, err error)
|
||||
@ -39,4 +38,5 @@ type Conversation interface {
|
||||
GetConversationsByConversationID(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error)
|
||||
GetConversationIDsNeedDestruct(ctx context.Context) ([]*model.Conversation, error)
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error)
|
||||
}
|
||||
|
||||
@ -28,6 +28,8 @@ type GroupMember interface {
|
||||
UpdateUserRoleLevels(ctx context.Context, groupID string, firstUserID string, firstUserRoleLevel int32, secondUserID string, secondUserRoleLevel int32) error
|
||||
FindMemberUserID(ctx context.Context, groupID string) (userIDs []string, err error)
|
||||
Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error)
|
||||
Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error)
|
||||
FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error)
|
||||
TakeOwner(ctx context.Context, groupID string) (groupMember *model.GroupMember, err error)
|
||||
SearchMember(ctx context.Context, keyword string, groupID string, pagination pagination.Pagination) (total int64, groupList []*model.GroupMember, err error)
|
||||
FindRoleLevelUserIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
|
||||
@ -41,40 +41,71 @@ func NewConversationMongo(db *mongo.Database) (*ConversationMgo, error) {
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return &ConversationMgo{coll: coll}, nil
|
||||
version, err := NewVersionLog(db.Collection(database.ConversationVersionName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ConversationMgo{version: version, coll: coll}, nil
|
||||
}
|
||||
|
||||
type ConversationMgo struct {
|
||||
coll *mongo.Collection
|
||||
version database.VersionLog
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) Create(ctx context.Context, conversations []*model.Conversation) (err error) {
|
||||
return mongoutil.InsertMany(ctx, c.coll, conversations)
|
||||
return mongoutil.IncrVersion(func() error {
|
||||
return mongoutil.InsertMany(ctx, c.coll, conversations)
|
||||
}, func() error {
|
||||
userConversation := make(map[string][]string)
|
||||
for _, conversation := range conversations {
|
||||
userConversation[conversation.OwnerUserID] = append(userConversation[conversation.OwnerUserID], conversation.ConversationID)
|
||||
}
|
||||
for userID, conversationIDs := range userConversation {
|
||||
if err := c.version.IncrVersion(ctx, userID, conversationIDs, model.VersionStateInsert); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) Delete(ctx context.Context, groupIDs []string) (err error) {
|
||||
return mongoutil.DeleteMany(ctx, c.coll, bson.M{"group_id": bson.M{"$in": groupIDs}})
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (rows int64, err error) {
|
||||
if len(args) == 0 {
|
||||
func (c *ConversationMgo) UpdateByMap(ctx context.Context, userIDs []string, conversationID string, args map[string]any) (int64, error) {
|
||||
if len(args) == 0 || len(userIDs) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
filter := bson.M{
|
||||
"conversation_id": conversationID,
|
||||
"owner_user_id": bson.M{"$in": userIDs},
|
||||
}
|
||||
if len(userIDs) > 0 {
|
||||
filter["owner_user_id"] = bson.M{"$in": userIDs}
|
||||
}
|
||||
res, err := mongoutil.UpdateMany(ctx, c.coll, filter, bson.M{"$set": args})
|
||||
var rows int64
|
||||
err := mongoutil.IncrVersion(func() error {
|
||||
res, err := mongoutil.UpdateMany(ctx, c.coll, filter, bson.M{"$set": args})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows = res.ModifiedCount
|
||||
return nil
|
||||
}, func() error {
|
||||
for _, userID := range userIDs {
|
||||
if err := c.version.IncrVersion(ctx, userID, []string{conversationID}, model.VersionStateUpdate); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res.ModifiedCount, nil
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) Update(ctx context.Context, conversation *model.Conversation) (err error) {
|
||||
return mongoutil.UpdateOne(ctx, c.coll, bson.M{"owner_user_id": conversation.OwnerUserID, "conversation_id": conversation.ConversationID}, bson.M{"$set": conversation}, true)
|
||||
return mongoutil.IncrVersion(func() error {
|
||||
return mongoutil.UpdateOne(ctx, c.coll, bson.M{"owner_user_id": conversation.OwnerUserID, "conversation_id": conversation.ConversationID}, bson.M{"$set": conversation}, true)
|
||||
}, func() error {
|
||||
return c.version.IncrVersion(ctx, conversation.OwnerUserID, []string{conversation.ConversationID}, model.VersionStateUpdate)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) Find(ctx context.Context, ownerUserID string, conversationIDs []string) (conversations []*model.Conversation, err error) {
|
||||
@ -178,3 +209,7 @@ func (c *ConversationMgo) GetConversationNotReceiveMessageUserIDs(ctx context.Co
|
||||
options.Find().SetProjection(bson.M{"_id": 0, "owner_user_id": 1}),
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationMgo) FindConversationUserVersion(ctx context.Context, userID string, version uint, limit int) (*model.VersionLog, error) {
|
||||
return c.version.FindChangeLog(ctx, userID, version, limit)
|
||||
}
|
||||
|
||||
@ -153,6 +153,22 @@ func (g *GroupMemberMgo) FindMemberUserID(ctx context.Context, groupID string) (
|
||||
return mongoutil.Find[string](ctx, g.coll, bson.M{"group_id": groupID}, options.Find().SetProjection(bson.M{"_id": 0, "user_id": 1}).SetSort(g.memberSort()))
|
||||
}
|
||||
|
||||
func (g *GroupMemberMgo) Find(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
filter := bson.M{"group_id": groupID}
|
||||
if len(userIDs) > 0 {
|
||||
filter["user_id"] = bson.M{"$in": userIDs}
|
||||
}
|
||||
return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
|
||||
}
|
||||
|
||||
func (g *GroupMemberMgo) FindInGroup(ctx context.Context, userID string, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
filter := bson.M{"user_id": userID}
|
||||
if len(groupIDs) > 0 {
|
||||
filter["group_id"] = bson.M{"$in": groupIDs}
|
||||
}
|
||||
return mongoutil.Find[*model.GroupMember](ctx, g.coll, filter)
|
||||
}
|
||||
|
||||
func (g *GroupMemberMgo) Take(ctx context.Context, groupID string, userID string) (groupMember *model.GroupMember, err error) {
|
||||
return mongoutil.FindOne[*model.GroupMember](ctx, g.coll, bson.M{"group_id": groupID, "user_id": userID})
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ func (m *MsgMgo) GetMsgDocModelByIndex(ctx context.Context, conversationID strin
|
||||
if sort != 1 && sort != -1 {
|
||||
return nil, errs.ErrArgs.WrapMsg("mongo sort must be 1 or -1")
|
||||
}
|
||||
opt := options.Find().SetLimit(1).SetSkip(index).SetSort(bson.M{"doc_id": sort}).SetLimit(1)
|
||||
opt := options.Find().SetSkip(index).SetSort(bson.M{"_id": sort}).SetLimit(1)
|
||||
filter := bson.M{"doc_id": primitive.Regex{Pattern: fmt.Sprintf("^%s:", conversationID)}}
|
||||
msgs, err := mongoutil.Find[*model.MsgDocModel](ctx, m.coll, filter, opt)
|
||||
if err != nil {
|
||||
|
||||
@ -16,10 +16,13 @@ package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
@ -68,3 +71,14 @@ func (o *S3Mongo) Take(ctx context.Context, engine string, name string) (*model.
|
||||
func (o *S3Mongo) Delete(ctx context.Context, engine string, name string) error {
|
||||
return mongoutil.DeleteOne(ctx, o.coll, bson.M{"name": name, "engine": engine})
|
||||
}
|
||||
func (o *S3Mongo) FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error) {
|
||||
return mongoutil.FindPage[*model.Object](ctx, o.coll, bson.M{
|
||||
"create_time": bson.M{"$lt": duration},
|
||||
}, pagination)
|
||||
}
|
||||
func (o *S3Mongo) FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error) {
|
||||
return mongoutil.Count(ctx, o.coll, bson.M{
|
||||
"key": key,
|
||||
"create_time": bson.M{"$gt": duration},
|
||||
})
|
||||
}
|
||||
|
||||
103
pkg/common/storage/database/mgo/seq_conversation.go
Normal file
103
pkg/common/storage/database/mgo/seq_conversation.go
Normal file
@ -0,0 +1,103 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func NewSeqConversationMongo(db *mongo.Database) (database.SeqConversation, error) {
|
||||
coll := db.Collection(database.SeqConversationName)
|
||||
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
|
||||
Keys: bson.D{
|
||||
{Key: "conversation_id", Value: 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &seqConversationMongo{coll: coll}, nil
|
||||
}
|
||||
|
||||
type seqConversationMongo struct {
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) setSeq(ctx context.Context, conversationID string, seq int64, field string) error {
|
||||
filter := map[string]any{
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
insert := bson.M{
|
||||
"conversation_id": conversationID,
|
||||
"min_seq": 0,
|
||||
"max_seq": 0,
|
||||
}
|
||||
delete(insert, field)
|
||||
update := map[string]any{
|
||||
"$set": bson.M{
|
||||
field: seq,
|
||||
},
|
||||
"$setOnInsert": insert,
|
||||
}
|
||||
opt := options.Update().SetUpsert(true)
|
||||
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
if size < 0 {
|
||||
return 0, errors.New("size must be greater than 0")
|
||||
}
|
||||
if size == 0 {
|
||||
return s.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
filter := map[string]any{"conversation_id": conversationID}
|
||||
update := map[string]any{
|
||||
"$inc": map[string]any{"max_seq": size},
|
||||
"$set": map[string]any{"min_seq": int64(0)},
|
||||
}
|
||||
opt := options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After).SetProjection(map[string]any{"_id": 0, "max_seq": 1})
|
||||
lastSeq, err := mongoutil.FindOneAndUpdate[int64](ctx, s.coll, filter, update, opt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lastSeq - size, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) SetMaxSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, seq, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "max_seq": 1}))
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if IsNotFound(err) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, bson.M{"conversation_id": conversationID}, options.FindOne().SetProjection(map[string]any{"_id": 0, "min_seq": 1}))
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if IsNotFound(err) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, seq, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqConversationMongo) GetConversation(ctx context.Context, conversationID string) (*model.SeqConversation, error) {
|
||||
return mongoutil.FindOne[*model.SeqConversation](ctx, s.coll, bson.M{"conversation_id": conversationID})
|
||||
}
|
||||
37
pkg/common/storage/database/mgo/seq_conversation_test.go
Normal file
37
pkg/common/storage/database/mgo/seq_conversation_test.go
Normal file
@ -0,0 +1,37 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Result[V any](val V, err error) V {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func Mongodb() *mongo.Database {
|
||||
return Result(
|
||||
mongo.Connect(context.Background(),
|
||||
options.Client().
|
||||
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
|
||||
SetConnectTimeout(5*time.Second)),
|
||||
).Database("openim_v3")
|
||||
}
|
||||
|
||||
func TestUserSeq(t *testing.T) {
|
||||
uSeq := Result(NewSeqUserMongo(Mongodb())).(*seqUserMongo)
|
||||
t.Log(uSeq.SetMinSeq(context.Background(), "1000", "2000", 4))
|
||||
}
|
||||
|
||||
func TestConversationSeq(t *testing.T) {
|
||||
cSeq := Result(NewSeqConversationMongo(Mongodb())).(*seqConversationMongo)
|
||||
t.Log(cSeq.SetMaxSeq(context.Background(), "2000", 10))
|
||||
t.Log(cSeq.Malloc(context.Background(), "2000", 10))
|
||||
t.Log(cSeq.GetMaxSeq(context.Background(), "2000"))
|
||||
}
|
||||
110
pkg/common/storage/database/mgo/seq_user.go
Normal file
110
pkg/common/storage/database/mgo/seq_user.go
Normal file
@ -0,0 +1,110 @@
|
||||
package mgo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func NewSeqUserMongo(db *mongo.Database) (database.SeqUser, error) {
|
||||
coll := db.Collection(database.SeqUserName)
|
||||
_, err := coll.Indexes().CreateOne(context.Background(), mongo.IndexModel{
|
||||
Keys: bson.D{
|
||||
{Key: "user_id", Value: 1},
|
||||
{Key: "conversation_id", Value: 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &seqUserMongo{coll: coll}, nil
|
||||
}
|
||||
|
||||
type seqUserMongo struct {
|
||||
coll *mongo.Collection
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) setSeq(ctx context.Context, conversationID string, userID string, seq int64, field string) error {
|
||||
filter := map[string]any{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
insert := bson.M{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
"min_seq": 0,
|
||||
"max_seq": 0,
|
||||
"read_seq": 0,
|
||||
}
|
||||
delete(insert, field)
|
||||
update := map[string]any{
|
||||
"$set": bson.M{
|
||||
field: seq,
|
||||
},
|
||||
"$setOnInsert": insert,
|
||||
}
|
||||
opt := options.Update().SetUpsert(true)
|
||||
return mongoutil.UpdateOne(ctx, s.coll, filter, update, false, opt)
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) getSeq(ctx context.Context, conversationID string, userID string, failed string) (int64, error) {
|
||||
filter := map[string]any{
|
||||
"user_id": userID,
|
||||
"conversation_id": conversationID,
|
||||
}
|
||||
opt := options.FindOne().SetProjection(bson.M{"_id": 0, failed: 1})
|
||||
seq, err := mongoutil.FindOne[int64](ctx, s.coll, filter, opt)
|
||||
if err == nil {
|
||||
return seq, nil
|
||||
} else if errors.Is(err, mongo.ErrNoDocuments) {
|
||||
return 0, nil
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "max_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "min_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return s.getSeq(ctx, conversationID, userID, "read_seq")
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error) {
|
||||
if len(conversationID) == 0 {
|
||||
return map[string]int64{}, nil
|
||||
}
|
||||
filter := bson.M{"user_id": userID, "conversation_id": bson.M{"$in": conversationID}}
|
||||
opt := options.Find().SetProjection(bson.M{"_id": 0, "conversation_id": 1, "read_seq": 1})
|
||||
seqs, err := mongoutil.Find[*model.SeqUser](ctx, s.coll, filter, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[string]int64)
|
||||
for _, seq := range seqs {
|
||||
res[seq.ConversationID] = seq.ReadSeq
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *seqUserMongo) SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.setSeq(ctx, conversationID, userID, seq, "read_seq")
|
||||
}
|
||||
@ -1,17 +1,20 @@
|
||||
package database
|
||||
|
||||
const (
|
||||
BlackName = "black"
|
||||
ConversationName = "conversation"
|
||||
FriendName = "friend"
|
||||
FriendVersionName = "friend_version"
|
||||
FriendRequestName = "friend_request"
|
||||
GroupName = "group"
|
||||
GroupMemberName = "group_member"
|
||||
GroupMemberVersionName = "group_member_version"
|
||||
GroupJoinVersionName = "group_join_version"
|
||||
GroupRequestName = "group_request"
|
||||
LogName = "log"
|
||||
ObjectName = "s3"
|
||||
UserName = "user"
|
||||
BlackName = "black"
|
||||
ConversationName = "conversation"
|
||||
FriendName = "friend"
|
||||
FriendVersionName = "friend_version"
|
||||
FriendRequestName = "friend_request"
|
||||
GroupName = "group"
|
||||
GroupMemberName = "group_member"
|
||||
GroupMemberVersionName = "group_member_version"
|
||||
GroupJoinVersionName = "group_join_version"
|
||||
ConversationVersionName = "conversation_version"
|
||||
GroupRequestName = "group_request"
|
||||
LogName = "log"
|
||||
ObjectName = "s3"
|
||||
UserName = "user"
|
||||
SeqConversationName = "seq"
|
||||
SeqUserName = "seq_user"
|
||||
)
|
||||
|
||||
@ -16,11 +16,16 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
)
|
||||
|
||||
type ObjectInfo interface {
|
||||
SetObject(ctx context.Context, obj *model.Object) error
|
||||
Take(ctx context.Context, engine string, name string) (*model.Object, error)
|
||||
Delete(ctx context.Context, engine string, name string) error
|
||||
FindByExpires(ctx context.Context, duration time.Time, pagination pagination.Pagination) (total int64, objects []*model.Object, err error)
|
||||
FindNotDelByS3(ctx context.Context, key string, duration time.Time) (int64, error)
|
||||
}
|
||||
|
||||
11
pkg/common/storage/database/seq.go
Normal file
11
pkg/common/storage/database/seq.go
Normal file
@ -0,0 +1,11 @@
|
||||
package database
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqConversation interface {
|
||||
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
}
|
||||
13
pkg/common/storage/database/seq_user.go
Normal file
13
pkg/common/storage/database/seq_user.go
Normal file
@ -0,0 +1,13 @@
|
||||
package database
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqUser interface {
|
||||
GetMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetReadSeqs(ctx context.Context, userID string, conversationID []string) (map[string]int64, error)
|
||||
}
|
||||
7
pkg/common/storage/model/seq.go
Normal file
7
pkg/common/storage/model/seq.go
Normal file
@ -0,0 +1,7 @@
|
||||
package model
|
||||
|
||||
type SeqConversation struct {
|
||||
ConversationID string `bson:"conversation_id"`
|
||||
MaxSeq int64 `bson:"max_seq"`
|
||||
MinSeq int64 `bson:"min_seq"`
|
||||
}
|
||||
9
pkg/common/storage/model/seq_user.go
Normal file
9
pkg/common/storage/model/seq_user.go
Normal file
@ -0,0 +1,9 @@
|
||||
package model
|
||||
|
||||
type SeqUser struct {
|
||||
UserID string `bson:"user_id"`
|
||||
ConversationID string `bson:"conversation_id"`
|
||||
MinSeq int64 `bson:"min_seq"`
|
||||
MaxSeq int64 `bson:"max_seq"`
|
||||
ReadSeq int64 `bson:"read_seq"`
|
||||
}
|
||||
@ -31,6 +31,12 @@ type Cache[V any] interface {
|
||||
Stop()
|
||||
}
|
||||
|
||||
func LRUStringHash(key string) uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func New[V any](opts ...Option) Cache[V] {
|
||||
opt := defaultOption()
|
||||
for _, o := range opts {
|
||||
@ -49,11 +55,7 @@ func New[V any](opts ...Option) Cache[V] {
|
||||
if opt.localSlotNum == 1 {
|
||||
c.local = createSimpleLRU()
|
||||
} else {
|
||||
c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, func(key string) uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write(*(*[]byte)(unsafe.Pointer(&key)))
|
||||
return h.Sum64()
|
||||
}, createSimpleLRU)
|
||||
c.local = lru.NewSlotLRU[string, V](opt.localSlotNum, LRUStringHash, createSimpleLRU)
|
||||
}
|
||||
if opt.linkSlotNum > 0 {
|
||||
c.link = link.New(opt.linkSlotNum)
|
||||
|
||||
@ -20,6 +20,7 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
|
||||
|
||||
type LRU[K comparable, V any] interface {
|
||||
Get(key K, fetch func() (V, error)) (V, error)
|
||||
SetHas(key K, value V) bool
|
||||
Del(key K) bool
|
||||
Stop()
|
||||
}
|
||||
|
||||
@ -89,5 +89,15 @@ func (x *ExpirationLRU[K, V]) Del(key K) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.core.Contains(key) {
|
||||
x.core.Add(key, &expirationLruItem[V]{value: value})
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) Stop() {
|
||||
}
|
||||
|
||||
@ -88,6 +88,28 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
|
||||
return v.value, v.err
|
||||
}
|
||||
|
||||
//func (x *LayLRU[K, V]) Set(key K, value V) {
|
||||
// x.lock.Lock()
|
||||
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
|
||||
// x.lock.Unlock()
|
||||
//}
|
||||
//
|
||||
//func (x *LayLRU[K, V]) Has(key K) bool {
|
||||
// x.lock.Lock()
|
||||
// defer x.lock.Unlock()
|
||||
// return x.core.Contains(key)
|
||||
//}
|
||||
|
||||
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
if x.core.Contains(key) {
|
||||
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *LayLRU[K, V]) Del(key K) bool {
|
||||
x.lock.Lock()
|
||||
ok := x.core.Remove(key)
|
||||
|
||||
@ -40,6 +40,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
|
||||
return x.slots[x.getIndex(key)].Get(key, fetch)
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
|
||||
return x.slots[x.getIndex(key)].SetHas(key, value)
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) Del(key K) bool {
|
||||
return x.slots[x.getIndex(key)].Del(key)
|
||||
}
|
||||
|
||||
@ -30,7 +30,7 @@ func defaultOption() *option {
|
||||
localSuccessTTL: time.Minute,
|
||||
localFailedTTL: time.Second * 5,
|
||||
delFn: make([]func(ctx context.Context, key ...string), 0, 2),
|
||||
target: emptyTarget{},
|
||||
target: EmptyTarget{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,14 +123,14 @@ func WithDeleteKeyBefore(fn func(ctx context.Context, key ...string)) Option {
|
||||
}
|
||||
}
|
||||
|
||||
type emptyTarget struct{}
|
||||
type EmptyTarget struct{}
|
||||
|
||||
func (e emptyTarget) IncrGetHit() {}
|
||||
func (e EmptyTarget) IncrGetHit() {}
|
||||
|
||||
func (e emptyTarget) IncrGetSuccess() {}
|
||||
func (e EmptyTarget) IncrGetSuccess() {}
|
||||
|
||||
func (e emptyTarget) IncrGetFailed() {}
|
||||
func (e EmptyTarget) IncrGetFailed() {}
|
||||
|
||||
func (e emptyTarget) IncrDelHit() {}
|
||||
func (e EmptyTarget) IncrDelHit() {}
|
||||
|
||||
func (e emptyTarget) IncrDelNotFound() {}
|
||||
func (e EmptyTarget) IncrDelNotFound() {}
|
||||
|
||||
@ -24,6 +24,10 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func IsGroupConversationID(conversationID string) bool {
|
||||
return strings.HasPrefix(conversationID, "g_") || strings.HasPrefix(conversationID, "sg_")
|
||||
}
|
||||
|
||||
func GetNotificationConversationIDByMsg(msg *sdkws.MsgData) string {
|
||||
switch msg.SessionType {
|
||||
case constant.SingleChatType:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user