check login

This commit is contained in:
wangchuxiao 2022-09-16 18:10:27 +08:00
commit 4c0b96d81a
68 changed files with 1224 additions and 311 deletions

View File

@ -46,11 +46,16 @@ func main() {
f, _ := os.Create("../logs/api.log")
gin.DefaultWriter = io.MultiWriter(f)
// gin.SetMode(gin.DebugMode)
r := gin.Default()
r := gin.New()
r.Use(gin.Recovery())
r.Use(utils.CorsHandler())
log.Info("load config: ", config.Config)
log.Info("load config: ", config.Config)
r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler))
if config.Config.Prometheus.Enable {
promePkg.NewApiRequestCounter()
promePkg.NewApiRequestFailedCounter()
promePkg.NewApiRequestSuccessCounter()
r.Use(promePkg.PromeTheusMiddleware)
r.GET("/metrics", promePkg.PrometheusHandler())
}
// user routing group, which handles user registration and login services
@ -226,10 +231,10 @@ func main() {
if config.Config.Api.ListenIP != "" {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
}
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
fmt.Println("start api server, address: ", address)
fmt.Println("start api server, address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n")
err := r.Run(address)
if err != nil {
log.Error("", "api run failed ", *ginPort, err.Error())
log.Error("", "api run failed ", address, err.Error())
panic("api start failed " + err.Error())
}
}

View File

@ -2,6 +2,7 @@ package main
import (
"Open_IM/internal/cms_api"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/utils"
"flag"
"fmt"
@ -24,6 +25,6 @@ func main() {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
}
address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort)
fmt.Println("start cms api server, address: ", address)
fmt.Println("start cms api server, address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n")
router.Run(address)
}

View File

@ -65,13 +65,12 @@ func main() {
defaultPorts := config.Config.Demo.Port
ginPort := flag.Int("port", defaultPorts[0], "get ginServerPort from cmd,default 10004 as port")
flag.Parse()
fmt.Println("start demo api server, port: ", *ginPort)
address := "0.0.0.0:" + strconv.Itoa(*ginPort)
if config.Config.Api.ListenIP != "" {
address = config.Config.Api.ListenIP + ":" + strconv.Itoa(*ginPort)
}
address = config.Config.CmsApi.ListenIP + ":" + strconv.Itoa(*ginPort)
fmt.Println("start demo api server address: ", address)
fmt.Println("start demo api server address: ", address, "OpenIM version: ", constant.CurrentVersion, "\n")
go register.OnboardingProcessRoutine()
go register.ImportFriendRoutine()
err := r.Run(address)

View File

@ -21,7 +21,7 @@ func main() {
flag.Parse()
var wg sync.WaitGroup
wg.Add(1)
fmt.Println("start rpc/msg_gateway server, port: ", *rpcPort, *wsPort, *prometheusPort)
fmt.Println("start rpc/msg_gateway server, port: ", *rpcPort, *wsPort, *prometheusPort, "OpenIM version: ", constant.CurrentVersion, "\n")
gate.Init(*rpcPort, *wsPort)
gate.Run(*prometheusPort)
wg.Wait()

View File

@ -17,7 +17,7 @@ func main() {
flag.Parse()
log.NewPrivateLog(constant.LogFileName)
logic.Init()
fmt.Println("start msg_transfer server")
fmt.Println("start msg_transfer server ", "OpenIM version: ", constant.CurrentVersion, "\n")
logic.Run(*prometheusPort)
wg.Wait()
}

View File

@ -18,7 +18,7 @@ func main() {
var wg sync.WaitGroup
wg.Add(1)
log.NewPrivateLog(constant.LogFileName)
fmt.Println("start push rpc server, port: ", *rpcPort)
fmt.Println("start push rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
logic.Init(*rpcPort)
logic.Run(*prometheusPort)
wg.Wait()

View File

@ -3,6 +3,7 @@ package main
import (
rpcMessageCMS "Open_IM/internal/rpc/admin_cms"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AdminCmsPrometheusPort[0], "adminCMSPrometheusPort default listen port")
flag.Parse()
fmt.Println("start cms rpc server, port: ", *rpcPort)
fmt.Println("start cms rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcMessageCMS.NewAdminCMSServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
rpcAuth "Open_IM/internal/rpc/auth"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.AuthPrometheusPort[0], "authPrometheusPort default listen port")
flag.Parse()
fmt.Println("start auth rpc server, port: ", *rpcPort)
fmt.Println("start auth rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcAuth.NewRpcAuthServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
rpcCache "Open_IM/internal/rpc/cache"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
@ -14,7 +15,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcToken default listen port 10800")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.CachePrometheusPort[0], "cachePrometheusPort default listen port")
flag.Parse()
fmt.Println("start cache rpc server, port: ", *rpcPort)
fmt.Println("start cache rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcCache.NewCacheServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
rpcConversation "Open_IM/internal/rpc/conversation"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "RpcConversation default listen port 11300")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.ConversationPrometheusPort[0], "conversationPrometheusPort default listen port")
flag.Parse()
fmt.Println("start conversation rpc server, port: ", *rpcPort)
fmt.Println("start conversation rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpcConversation.NewRpcConversationServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
"Open_IM/internal/rpc/friend"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcFriendPort from cmd,default 12000 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.FriendPrometheusPort[0], "friendPrometheusPort default listen port")
flag.Parse()
fmt.Println("start friend rpc server, port: ", *rpcPort)
fmt.Println("start friend rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := friend.NewFriendServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
"Open_IM/internal/rpc/group"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcGroupPort from cmd,default 16000 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.GroupPrometheusPort[0], "groupPrometheusPort default listen port")
flag.Parse()
fmt.Println("start group rpc server, port: ", *rpcPort)
fmt.Println("start group rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := group.NewGroupServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
"Open_IM/internal/rpc/msg"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.MessagePrometheusPort[0], "msgPrometheusPort default listen port")
flag.Parse()
fmt.Println("start msg rpc server, port: ", *rpcPort)
fmt.Println("start msg rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := msg.NewRpcChatServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
rpc "Open_IM/internal/rpc/office"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OfficePrometheusPort[0], "officePrometheusPort default listen port")
flag.Parse()
fmt.Println("start office rpc server, port: ", *rpcPort)
fmt.Println("start office rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := rpc.NewOfficeServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
"Open_IM/internal/rpc/organization"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "get RpcOrganizationPort from cmd,default 11200 as port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.OrganizationPrometheusPort[0], "organizationPrometheusPort default listen port")
flag.Parse()
fmt.Println("start organization rpc server, port: ", *rpcPort)
fmt.Println("start organization rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := organization.NewServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -3,6 +3,7 @@ package main
import (
"Open_IM/internal/rpc/user"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
promePkg "Open_IM/pkg/common/prometheus"
"flag"
"fmt"
@ -13,7 +14,7 @@ func main() {
rpcPort := flag.Int("port", defaultPorts[0], "rpc listening port")
prometheusPort := flag.Int("prometheus_port", config.Config.Prometheus.UserPrometheusPort[0], "userPrometheusPort default listen port")
flag.Parse()
fmt.Println("start user rpc server, port: ", *rpcPort)
fmt.Println("start user rpc server, port: ", *rpcPort, "OpenIM version: ", constant.CurrentVersion, "\n")
rpcServer := user.NewUserServer(*rpcPort)
go func() {
err := promePkg.StartPromeSrv(*prometheusPort)

View File

@ -7,6 +7,8 @@ serverversion: 2.3.1
etcd:
etcdSchema: openim #默认即可
etcdAddr: [ 127.0.0.1:2379 ] #单机部署时,默认即可
userName:
password:
k8sMod: false #开启k8s模式 使用pod里面环境变量请求services调用服务 而并非etcd
@ -20,10 +22,13 @@ mysql:
dbMaxOpenConns: 100
dbMaxIdleConns: 10
dbMaxLifeTime: 5
logLevel: 1 #1=slient 2=error 3=warn 4=info
slowThreshold: 500
mongo:
dbUri: ""#当dbUri值不为空则直接使用该值
dbAddress: 127.0.0.1:37017 #单机时为mongo地址使用分片集群时为mongos地址 默认即可
#dbAddress: 127.0.0.1:37017 #单机时为mongo地址使用分片集群时为mongos地址 默认即可
dbAddress: [ 127.0.0.1:37017 ] #单机时为mongo地址使用分片集群时为mongos地址 默认即可
dbDirect: false
dbTimeout: 60
dbDatabase: openIM #mongo db 默认即可
@ -44,6 +49,8 @@ redis:
enableCluster: false #如果外部redis以集群方式启动需要打开此开关
kafka:
SASLUserName:
SASLPassword:
ws2mschat:
addr: [ 127.0.0.1:9092 ] #kafka配置默认即可
topic: "ws2ms_chat" #用于mongo和mysql保存消息
@ -116,7 +123,7 @@ credential: #腾讯cos发送图片、视频、文件时需要请自行申
appBucket: app # 存储app的桶
location: us-east-1
endpoint: http://127.0.0.1:10005 #minio外网ip 这个ip是给客户端访问的
endpointInner: http://127.0.0.1:10005 #minio内网地址 如果im server 可以通过内网访问到 minio就可以填写
endpointInner: http://127.0.0.1:10005 #minio内网地址 如果im server 可以通过内网访问到 minio就可以
endpointInnerEnable: true #是否启用minio内网地址 启用可以让桶初始化IM server连接minio走内网地址访问
accessKeyID: user12345
secretAccessKey: key12345
@ -227,6 +234,8 @@ push:
appKey: ""
intent: ""
enable: false
channelID: ""
channelName: ""
fcm: #firebase cloud message 消息推送
serviceAccount: "openim-5c6c0-firebase-adminsdk-ppwol-8765884a78.json" #帐号文件,此处需要改修配置,并且这个文件放在 config目录下
enable: false

View File

@ -137,38 +137,41 @@ services:
max-size: "1g"
max-file: "2"
# prometheus:
# image: prom/prometheus
# volumes:
# - ./docker-compose_cfg/prometheus-compose.yml:/etc/prometheus/prometheus.yml
# # - ./components/prometheus_data:/prometheus
# container_name: prometheus
# ports:
# - 9091:9091
# depends_on:
# - open_im_server
# command: --web.listen-address=:9091 --config.file="/etc/prometheus/prometheus.yml"
# network_mode: "host"
#
# grafana:
# image: grafana/grafana
# volumes:
# # - ./grafana/dashboards/dashboard.json:/var/lib/grafana/dashboards/dashboard.json
# # - ./grafana/provisioning/dashboard.yaml:/etc/grafana/provisioning/dashboards/dashboard.yaml
# - ./docker-compose_cfg/datasource-compose.yaml:/etc/grafana/provisioning/datasources/datasource.yaml
# - ./docker-compose_cfg/grafana.ini:/etc/grafana/grafana.ini
# - ./docker-compose_cfg/node-exporter-full_rev1.json:/var/lib/grafana/dashboards/node-exporter-full_rev1.json
# container_name: grafana
# ports:
# - 10007:10007
# depends_on:
# - prometheus
# network_mode: "host"
#
# node-exporter:
# image: quay.io/prometheus/node-exporter
# container_name: node-exporter
# restart: always
# ports:
# - "9100:9100"
# # command: --collector.ENTER-THE-NAME-OF-COLLECTOR
prometheus:
image: prom/prometheus
volumes:
- ./docker-compose_cfg/prometheus-compose.yml:/etc/prometheus/prometheus.yml
# - ./components/prometheus_data:/prometheus
container_name: prometheus
ports:
- 9091:9091
depends_on:
- open_im_server
command: --web.listen-address=:9091 --config.file="/etc/prometheus/prometheus.yml"
network_mode: "host"
grafana:
image: grafana/grafana
volumes:
- ./docker-compose_cfg/datasource-compose.yaml:/etc/grafana/provisioning/datasources/datasource.yaml
- ./docker-compose_cfg/grafana.ini:/etc/grafana/grafana.ini
# - ./docker-compose_cfg/node-exporter-full_rev1.json:/var/lib/grafana/dashboards/node-exporter-full_rev1.json
# - ./components/grafana:/var/lib/grafana
- ./docker-compose_cfg/grafana.db:/var/lib/grafana/grafana.db
container_name: grafana
ports:
- 10007:10007
depends_on:
- prometheus
network_mode: "host"
privileged: true
user: root
# -rw-r-----
node-exporter:
image: quay.io/prometheus/node-exporter
container_name: node-exporter
restart: always
ports:
- "9100:9100"
# command: --collector.ENTER-THE-NAME-OF-COLLECTOR

BIN
docker-compose_cfg/grafana.db Executable file

Binary file not shown.

View File

@ -1105,7 +1105,7 @@ disable_sanitize_html = false
enable_alpha = false
app_tls_skip_verify_insecure = false
# Enter a comma-separated list of plugin identifiers to identify plugins to load even if they are unsigned. Plugins with modified signatures are never loaded.
allow_loading_unsigned_plugins =
allow_loading_unsigned_plugins = grafana-simple-json-backend-datasource
# Enable or disable installing / uninstalling / updating plugins directly from within Grafana.
plugin_admin_enabled = true
plugin_admin_external_manage_enabled = false

4
go.mod
View File

@ -27,6 +27,7 @@ require (
github.com/golang-jwt/jwt/v4 v4.1.0
github.com/golang/protobuf v1.5.2
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/jinzhu/copier v0.3.4
github.com/jinzhu/gorm v1.9.16
github.com/jonboulle/clockwork v0.2.2 // indirect
@ -38,7 +39,7 @@ require (
github.com/olivere/elastic/v7 v7.0.23
github.com/pelletier/go-toml/v2 v2.0.2 // indirect
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_golang v1.13.0
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5
github.com/robfig/cron/v3 v3.0.1
github.com/sirupsen/logrus v1.8.1
@ -49,7 +50,6 @@ require (
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.428
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sms v1.0.428
github.com/tencentyun/qcloud-cos-sts-sdk v0.0.0-20210325043845-84a0811633ca
github.com/xuri/excelize/v2 v2.6.0
go.etcd.io/etcd/api/v3 v3.5.4
go.etcd.io/etcd/client/v3 v3.5.4
go.mongodb.org/mongo-driver v1.8.3

24
go.sum
View File

@ -156,6 +156,7 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
@ -178,6 +179,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@ -249,6 +251,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0=
github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -341,6 +344,7 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
@ -419,6 +423,7 @@ github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkL
github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible/go.mod h1:ZQnN8lSECaebrkQytbHj4xNgtg8CR7RYXnPok8e0EHA=
github.com/lestrrat-go/strftime v1.0.4 h1:T1Rb9EPkAhgxKqbcMIPguPq8glqXTA1koF8n9BHElA8=
github.com/lestrrat-go/strftime v1.0.4/go.mod h1:E1nN3pCbtMSu1yjSVeyuRFVm/U0xoR76fd03sz+Qz4g=
github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lithammer/shortuuid v3.0.0+incompatible h1:NcD0xWW/MZYXEHa6ITy6kaXN5nwm/V115vj2YXfhS0w=
github.com/lithammer/shortuuid v3.0.0+incompatible/go.mod h1:FR74pbAuElzOUuenUHTK2Tciko1/vKuIKS9dSkDrA4w=
@ -430,6 +435,7 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA=
github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -450,8 +456,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -521,10 +525,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM=
github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk=
github.com/richardlehane/msoleps v1.0.1 h1:RfrALnSNXzmXLbGct/P2b4xkFz4e8Gmj/0Vj9M9xC1o=
github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5 h1:mZHayPoR0lNmnHyvtYjDeq0zlVHn9K/ZXoy17ylucdo=
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5/go.mod h1:GEXHk5HgEKCvEIIrSpFI3ozzG5xOKA2DVlEX/gGnewM=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
@ -594,12 +594,6 @@ github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyh
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xuri/efp v0.0.0-20220407160117-ad0f7a785be8 h1:3X7aE0iLKJ5j+tz58BpvIZkXNV7Yq4jC93Z/rbN2Fxk=
github.com/xuri/efp v0.0.0-20220407160117-ad0f7a785be8/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
github.com/xuri/excelize/v2 v2.6.0 h1:m/aXAzSAqxgt74Nfd+sNzpzVKhTGl7+S9nbG4A57mF4=
github.com/xuri/excelize/v2 v2.6.0/go.mod h1:Q1YetlHesXEKwGFfeJn7PfEZz2IvHb6wdOeYjBxVcVs=
github.com/xuri/nfp v0.0.0-20220409054826-5e722a1d9e22 h1:OAmKAfT06//esDdpi/DZ8Qsdt4+M5+ltca05dA5bG2M=
github.com/xuri/nfp v0.0.0-20220409054826-5e722a1d9e22/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -654,7 +648,6 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220408190544-5352b0902921/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -747,7 +740,6 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220407224826-aac1ed45d8e3/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220622184535-263ec571b305 h1:dAgbJ2SP4jD6XYfMNLVj0BF21jo2PjChrtGaAvF5M3I=
golang.org/x/net v0.0.0-20220622184535-263ec571b305/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@ -766,8 +758,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -779,7 +771,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1076,7 +1067,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=

View File

@ -42,10 +42,14 @@ func KickGroupMember(c *gin.Context) {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
if len(params.KickedUserIDList) > constant.MaxNotificationNum {
errMsg := params.OperationID + " too many members " + utils.Int32ToString(int32(len(params.KickedUserIDList)))
log.Error(params.OperationID, errMsg)
c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": errMsg})
return
}
req := &rpc.KickGroupMemberReq{}
utils.CopyStructFields(req, &params)
var ok bool
var errInfo string
ok, req.OpUserID, errInfo = token_verify.GetUserIDFromToken(c.Request.Header.Get("token"), req.OperationID)
@ -318,6 +322,12 @@ func InviteUserToGroup(c *gin.Context) {
c.JSON(http.StatusBadRequest, gin.H{"errCode": 400, "errMsg": err.Error()})
return
}
if len(params.InvitedUserIDList) > constant.MaxNotificationNum {
errMsg := params.OperationID + " too many members " + utils.Int32ToString(int32(len(params.InvitedUserIDList)))
log.Error(params.OperationID, errMsg)
c.JSON(http.StatusOK, gin.H{"errCode": 400, "errMsg": errMsg})
return
}
req := &rpc.InviteUserToGroupReq{}
utils.CopyStructFields(req, &params)

View File

@ -3,6 +3,7 @@ package admin
import (
apiStruct "Open_IM/pkg/cms_api_struct"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbAdmin "Open_IM/pkg/proto/admin_cms"
@ -25,6 +26,7 @@ var (
)
func init() {
log.NewPrivateLog(constant.LogFileName)
operationID := utils.OperationIDGenerator()
log.NewInfo(operationID, utils.GetSelfFuncName(), "minio config: ", config.Config.Credential.Minio)
var initUrl string

View File

@ -18,7 +18,8 @@ import (
func NewGinRouter() *gin.Engine {
gin.SetMode(gin.ReleaseMode)
baseRouter := gin.Default()
baseRouter := gin.New()
baseRouter.Use()
if config.Config.Prometheus.Enable {
baseRouter.GET("/metrics", promePkg.PrometheusHandler())
}

View File

@ -1,7 +1,6 @@
package main
import "Open_IM/pkg/common/db"
func main() {
db.DB.BatchInsertChat()
}
//
//func main() {
// db.DB.BatchInsertChat()
//}

View File

@ -27,14 +27,13 @@ var (
)
func Init(rpcPort, wsPort int) {
//log initialization
rwLock = new(sync.RWMutex)
validate = validator.New()
statistics.NewStatistics(&sendMsgAllCount, config.Config.ModuleName.LongConnSvrName, fmt.Sprintf("%d second recv to msg_gateway sendMsgCount", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
statistics.NewStatistics(&userCount, config.Config.ModuleName.LongConnSvrName, fmt.Sprintf("%d second add user conn", constant.StatisticsTimeInterval), constant.StatisticsTimeInterval)
ws.onInit(wsPort)
rpcSvr.onInit(rpcPort)
initPrometheus()
}
func Run(promethuesPort int) {

View File

@ -5,6 +5,7 @@ import (
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbChat "Open_IM/pkg/proto/msg"
pbRtc "Open_IM/pkg/proto/rtc"
@ -43,15 +44,18 @@ func (ws *WServer) msgParse(conn *UserConn, binaryMsg []byte) {
case constant.WSGetNewestSeq:
log.NewInfo(m.OperationID, "getSeqReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.getSeqReq(conn, &m)
promePkg.PromeInc(promePkg.GetNewestSeqTotalCounter)
case constant.WSSendMsg:
log.NewInfo(m.OperationID, "sendMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.sendMsgReq(conn, &m)
promePkg.PromeInc(promePkg.MsgRecvTotalCounter)
case constant.WSSendSignalMsg:
log.NewInfo(m.OperationID, "sendSignalMsgReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.sendSignalMsgReq(conn, &m)
case constant.WSPullMsgBySeqList:
log.NewInfo(m.OperationID, "pullMsgBySeqListReq ", m.SendID, m.MsgIncr, m.ReqIdentifier)
ws.pullMsgBySeqListReq(conn, &m)
promePkg.PromeInc(promePkg.PullMsgBySeqListTotalCounter)
case constant.WsLogoutMsg:
log.NewInfo(m.OperationID, "conn.Close()", m.SendID, m.MsgIncr, m.ReqIdentifier)
// conn.Close()

View File

@ -4,6 +4,7 @@ import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbRelay "Open_IM/pkg/proto/relay"
@ -12,11 +13,13 @@ import (
"bytes"
"context"
"encoding/gob"
"github.com/golang/protobuf/proto"
"net"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/gorilla/websocket"
"google.golang.org/grpc"
)
@ -31,6 +34,17 @@ type RPCServer struct {
target string
}
func initPrometheus() {
promePkg.NewMsgRecvTotalCounter()
promePkg.NewGetNewestSeqTotalCounter()
promePkg.NewPullMsgBySeqListTotalCounter()
promePkg.NewMsgOnlinePushSuccessCounter()
promePkg.NewOnlineUserGauges()
//promePkg.NewSingleChatMsgRecvSuccessCounter()
//promePkg.NewGroupChatMsgRecvSuccessCounter()
//promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter()
}
func (r *RPCServer) onInit(rpcPort int) {
r.rpcPort = rpcPort
r.rpcRegisterName = config.Config.RpcRegisterName.OpenImRelayName
@ -52,7 +66,18 @@ func (r *RPCServer) run() {
panic("listening err:" + err.Error() + r.rpcRegisterName)
}
defer listener.Close()
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
pbRelay.RegisterRelayServer(srv, r)
@ -174,6 +199,7 @@ func (r *RPCServer) SuperGroupOnlineBatchPushOneMsg(_ context.Context, req *pbRe
resultCode := sendMsgBatchToUser(userConn, replyBytes.Bytes(), req, platform, v)
if resultCode == 0 && utils.IsContainInt(platform, r.pushTerminal) {
tempT.OnlinePush = true
promePkg.PromeInc(promePkg.MsgOnlinePushSuccessCounter)
log.Info(req.OperationID, "PushSuperMsgToUser is success By Ws", "args", req.String(), "recvPlatForm", constant.PlatformIDToName(platform), "recvID", v)
temp := &pbRelay.SingleMsgToUserPlatform{
ResultCode: resultCode,

View File

@ -5,6 +5,7 @@ import (
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbRelay "Open_IM/pkg/proto/relay"
@ -90,7 +91,7 @@ func (ws *WServer) readMsg(conn *UserConn) {
log.NewInfo("", "this is a pingMessage")
}
if err != nil {
log.Error("", "WS ReadMsg error ", " userIP", conn.RemoteAddr().String(), "userUid", "platform", "error", err.Error())
log.NewWarn("", "WS ReadMsg error ", " userIP", conn.RemoteAddr().String(), "userUid", "platform", "error", err.Error())
userCount--
ws.delUserConn(conn)
return
@ -313,6 +314,7 @@ func (ws *WServer) addUserConn(uid string, platformID int, conn *UserConn, token
for _, v := range ws.wsUserToConn {
count = count + len(v)
}
promePkg.PromeGaugeInc(promePkg.OnlineUserGauge)
log.Debug(operationID, "WS Add operation", "", "wsUser added", ws.wsUserToConn, "connection_uid", uid, "connection_platform", constant.PlatformIDToName(platformID), "online_user_num", len(ws.wsUserToConn), "online_conn_num", count)
}
@ -352,6 +354,7 @@ func (ws *WServer) delUserConn(conn *UserConn) {
if callbackResp.ErrCode != 0 {
log.NewError(operationID, utils.GetSelfFuncName(), "callbackUserOffline failed", callbackResp)
}
promePkg.PromeGaugeDec(promePkg.OnlineUserGauge)
}
func (ws *WServer) getUserConn(uid string, platform int) *UserConn {

View File

@ -37,6 +37,9 @@ var (
func Init() {
cmdCh = make(chan Cmd2Value, 10000)
w = new(sync.Mutex)
if config.Config.Prometheus.Enable {
initPrometheus()
}
persistentCH.Init() // ws2mschat save mysql
historyCH.Init(cmdCh) //
historyMongoCH.Init()

View File

@ -18,13 +18,7 @@ import (
"github.com/Shopify/sarama"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
msgInsertMysqlCounter prometheus.Counter
msgInsertFailedMysqlCounter prometheus.Counter
promePkg "Open_IM/pkg/common/prometheus"
)
type PersistentConsumerHandler struct {
@ -38,20 +32,18 @@ func (pc *PersistentConsumerHandler) Init() {
pc.persistentConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic},
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMySql)
if config.Config.Prometheus.Enable {
pc.initPrometheus()
}
}
func (pc *PersistentConsumerHandler) initPrometheus() {
msgInsertMysqlCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "insert_mysql_msg_total",
Help: "The total number of msg insert mysql events",
})
msgInsertFailedMysqlCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "insert_mysql_failed_msg_total",
Help: "The total number of msg insert mysql events",
})
func initPrometheus() {
promePkg.NewSeqGetSuccessCounter()
promePkg.NewSeqGetFailedCounter()
promePkg.NewSeqSetSuccessCounter()
promePkg.NewSeqSetFailedCounter()
promePkg.NewMsgInsertRedisSuccessCounter()
promePkg.NewMsgInsertRedisFailedCounter()
promePkg.NewMsgInsertMongoSuccessCounter()
promePkg.NewMsgInsertMongoFailedCounter()
}
func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) {
@ -85,12 +77,8 @@ func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMes
log.NewInfo(msgFromMQ.OperationID, "msg_transfer msg persisting", string(msg))
if err = im_mysql_msg_model.InsertMessageToChatLog(msgFromMQ); err != nil {
log.NewError(msgFromMQ.OperationID, "Message insert failed", "err", err.Error(), "msg", msgFromMQ.String())
// msgInsertFailedMysqlCounter.Inc()
return
}
if config.Config.Prometheus.Enable {
// msgInsertMysqlCounter.Inc()
}
}
}

View File

@ -88,9 +88,11 @@ type Android struct {
}
type Notification struct {
Title string `json:"title"`
Body string `json:"body"`
ClickType string `json:"click_type"`
Title string `json:"title"`
Body string `json:"body"`
ChannelID string `json:"channelID"`
ChannelName string `json:"ChannelName"`
ClickType string `json:"click_type"`
}
type Options struct {
@ -135,9 +137,11 @@ func (g *Getui) Push(userIDList []string, title, detailContent, operationID stri
}{Alias: []string{userIDList[0]}},
}
pushReq.PushMessage.Notification = Notification{
Title: title,
Body: detailContent,
ClickType: "startapp",
Title: title,
Body: detailContent,
ClickType: "startapp",
ChannelID: config.Config.Push.Getui.ChannelID,
ChannelName: config.Config.Push.Getui.ChannelName,
}
pushReq.PushChannel.Ios.Aps.Sound = "default"
pushReq.PushChannel.Ios.Aps.Alert = Alert{

View File

@ -47,6 +47,11 @@ func init() {
}
}
func initPrometheus() {
promePkg.NewMsgOfflinePushSuccessCounter()
promePkg.NewMsgOfflinePushFailedCounter()
}
func Run(promethuesPort int) {
go rpcServer.run()
go pushCh.pushConsumerGroup.RegisterHandleAndConsumer(&pushCh)

View File

@ -4,14 +4,17 @@ import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbPush "Open_IM/pkg/proto/push"
"Open_IM/pkg/utils"
"context"
"net"
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@ -42,7 +45,18 @@ func (r *RPCServer) run() {
panic("listening err:" + err.Error() + r.rpcRegisterName)
}
defer listener.Close()
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
pbPush.RegisterPushMsgServiceServer(srv, r)
rpcRegisterIP := config.Config.RpcRegisterIP

View File

@ -21,6 +21,7 @@ import (
"context"
"strings"
promePkg "Open_IM/pkg/common/prometheus"
"github.com/golang/protobuf/proto"
)
@ -140,8 +141,10 @@ func MsgToUser(pushMsg *pbPush.PushMsgReq) {
}
pushResult, err := offlinePusher.Push(UIDList, title, detailContent, pushMsg.OperationID, opts)
if err != nil {
promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter)
log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error())
} else {
promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter)
log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData)
}
}
@ -261,12 +264,13 @@ func MsgToSuperGroupUser(pushMsg *pbPush.PushMsgReq) {
}
pushResult, err := offlinePusher.Push(needOfflinePushUserIDList, title, detailContent, pushMsg.OperationID, opts)
if err != nil {
promePkg.PromeInc(promePkg.MsgOfflinePushFailedCounter)
log.NewError(pushMsg.OperationID, "offline push error", pushMsg.String(), err.Error())
} else {
promePkg.PromeInc(promePkg.MsgOfflinePushSuccessCounter)
log.NewDebug(pushMsg.OperationID, "offline push return result is ", pushResult, pushMsg.MsgData)
}
}
}
}

View File

@ -6,10 +6,14 @@ import (
"Open_IM/pkg/common/db"
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbAdminCMS "Open_IM/pkg/proto/admin_cms"
server_api_params "Open_IM/pkg/proto/sdk_ws"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"Open_IM/pkg/utils"
"context"
"errors"
@ -57,8 +61,18 @@ func (s *adminCMSServer) Run() {
}
log.NewInfo("0", "listen network success, ", address, listener)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//Service registers with etcd
pbAdminCMS.RegisterAdminCMSServer(srv, s)

View File

@ -5,6 +5,7 @@ import (
"Open_IM/pkg/common/db"
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbAuth "Open_IM/pkg/proto/auth"
@ -16,6 +17,8 @@ import (
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"Open_IM/pkg/common/config"
"google.golang.org/grpc"
@ -35,6 +38,7 @@ func (rpc *rpcAuth) UserRegister(_ context.Context, req *pbAuth.UserRegisterReq)
log.NewError(req.OperationID, errMsg, user)
return &pbAuth.UserRegisterResp{CommonResp: &pbAuth.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: errMsg}}, nil
}
promePkg.PromeInc(promePkg.UserRegisterCounter)
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), " rpc return ", pbAuth.UserRegisterResp{CommonResp: &pbAuth.CommonResp{}})
return &pbAuth.UserRegisterResp{CommonResp: &pbAuth.CommonResp{}}, nil
}
@ -47,6 +51,7 @@ func (rpc *rpcAuth) UserToken(_ context.Context, req *pbAuth.UserTokenReq) (*pbA
log.NewError(req.OperationID, errMsg)
return &pbAuth.UserTokenResp{CommonResp: &pbAuth.CommonResp{ErrCode: constant.ErrDB.ErrCode, ErrMsg: errMsg}}, nil
}
promePkg.PromeInc(promePkg.UserLoginCounter)
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), " rpc return ", pbAuth.UserTokenResp{CommonResp: &pbAuth.CommonResp{}, Token: tokens, ExpiredTime: expTime})
return &pbAuth.UserTokenResp{CommonResp: &pbAuth.CommonResp{}, Token: tokens, ExpiredTime: expTime}, nil
}
@ -118,8 +123,20 @@ func (rpc *rpcAuth) Run() {
panic("listening err:" + err.Error() + rpc.rpcRegisterName)
}
log.NewInfo(operationID, "listen network success, ", address, listener)
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
promePkg.NewUserRegisterCounter()
promePkg.NewUserLoginCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//service registers with etcd

View File

@ -5,6 +5,7 @@ import (
"Open_IM/pkg/common/constant"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbCache "Open_IM/pkg/proto/cache"
"Open_IM/pkg/utils"
@ -13,6 +14,7 @@ import (
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@ -50,8 +52,18 @@ func (s *cacheServer) Run() {
log.NewInfo("0", "listen network success, ", address, listener)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
pbCache.RegisterCacheServer(srv, s)

View File

@ -7,6 +7,7 @@ import (
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbConversation "Open_IM/pkg/proto/conversation"
"Open_IM/pkg/utils"
@ -15,6 +16,8 @@ import (
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"Open_IM/pkg/common/config"
"google.golang.org/grpc"
@ -184,7 +187,18 @@ func (rpc *rpcConversation) Run() {
}
log.NewInfo("0", "listen network success, ", address, listener)
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//service registers with etcd

View File

@ -6,8 +6,9 @@ import (
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/db/rocks_cache"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
cp "Open_IM/pkg/common/utils"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
@ -21,6 +22,8 @@ import (
"strings"
"time"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@ -60,7 +63,18 @@ func (s *friendServer) Run() {
log.NewInfo("0", "listen ok ", address)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//User friend related services register to etcd
pbFriend.RegisterFriendServer(srv, s)

View File

@ -8,6 +8,7 @@ import (
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
cp "Open_IM/pkg/common/utils"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
@ -25,6 +26,8 @@ import (
"strings"
"time"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
"gorm.io/gorm"
)
@ -65,11 +68,21 @@ func (s *groupServer) Run() {
//grpc server
recvSize := 1024 * 1024 * constant.GroupRPCRecvSize
sendSize := 1024 * 1024 * constant.GroupRPCSendSize
var options = []grpc.ServerOption{
var grpcOpts = []grpc.ServerOption{
grpc.MaxRecvMsgSize(recvSize),
grpc.MaxSendMsgSize(sendSize),
}
srv := grpc.NewServer(options...)
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//Service registers with etcd
pbGroup.RegisterGroupServer(srv, s)
@ -1552,6 +1565,9 @@ func (s *groupServer) DismissGroup(ctx context.Context, req *pbGroup.DismissGrou
if err := rocksCache.DelGroupInfoFromCache(req.GroupID); err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), err.Error(), req.GroupID)
}
if err := rocksCache.DelGroupMemberListHashFromCache(req.GroupID); err != nil {
log.NewError(req.OperationID, utils.GetSelfFuncName(), err.Error(), req.GroupID)
}
log.NewInfo(req.OperationID, utils.GetSelfFuncName(), "rpc return ", pbGroup.CommonResp{ErrCode: 0, ErrMsg: ""})
return &pbGroup.DismissGroupResp{CommonResp: &pbGroup.CommonResp{ErrCode: 0, ErrMsg: ""}}, nil
}

View File

@ -8,6 +8,8 @@ import (
commonDB "Open_IM/pkg/common/db"
"Open_IM/pkg/common/log"
open_im_sdk "Open_IM/pkg/proto/sdk_ws"
promePkg "Open_IM/pkg/common/prometheus"
)
func (rpc *rpcChat) GetMaxAndMinSeq(_ context.Context, in *open_im_sdk.GetMaxAndMinSeqReq) (*open_im_sdk.GetMaxAndMinSeqResp, error) {
@ -48,57 +50,62 @@ func (rpc *rpcChat) PullMessageBySeqList(_ context.Context, in *open_im_sdk.Pull
log.NewInfo(in.OperationID, "rpc PullMessageBySeqList is arriving", in.String())
resp := new(open_im_sdk.PullMessageBySeqListResp)
m := make(map[string]*open_im_sdk.MsgDataList)
//msgList, err := commonDB.DB.GetMsgBySeqList(in.UserID, in.SeqList, in.OperationID)
redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(in.UserID, in.SeqList, in.OperationID)
if err != nil {
if err != go_redis.Nil {
promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList)
} else {
log.Debug(in.OperationID, "get message from redis is nil", failedSeqList)
}
msgList, err1 := commonDB.DB.GetMsgBySeqListMongo2(in.UserID, failedSeqList, in.OperationID)
if err1 != nil {
promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err.Error())
resp.ErrCode = 201
resp.ErrMsg = err.Error()
return resp, nil
} else {
promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList))
redisMsgList = append(redisMsgList, msgList...)
resp.List = redisMsgList
}
} else {
promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList))
resp.List = redisMsgList
}
for k, v := range in.GroupSeqList {
x := new(open_im_sdk.MsgDataList)
redisMsgList, failedSeqList, err := commonDB.DB.GetMessageListBySeq(k, v.SeqList, in.OperationID)
if err != nil {
if err != go_redis.Nil {
promePkg.PromeAdd(promePkg.MsgPullFromRedisFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "get message from redis exception", err.Error(), failedSeqList)
} else {
log.Debug(in.OperationID, "get message from redis is nil", failedSeqList)
}
msgList, err1 := commonDB.DB.GetSuperGroupMsgBySeqListMongo(k, failedSeqList, in.OperationID)
if err1 != nil {
promePkg.PromeAdd(promePkg.MsgPullFromMongoFailedCounter, len(failedSeqList))
log.Error(in.OperationID, "PullMessageBySeqList data error", in.String(), err.Error())
resp.ErrCode = 201
resp.ErrMsg = err.Error()
return resp, nil
} else {
promePkg.PromeAdd(promePkg.MsgPullFromMongoSuccessCounter, len(msgList))
redisMsgList = append(redisMsgList, msgList...)
x.MsgDataList = redisMsgList
m[k] = x
}
} else {
promePkg.PromeAdd(promePkg.MsgPullFromRedisSuccessCounter, len(redisMsgList))
x.MsgDataList = redisMsgList
m[k] = x
}
}
resp.GroupMsgDataList = m
//respSingleMsgFormat = singleMsgHandleByUser(SingleMsgFormat, in.UserID)
//respGroupMsgFormat = groupMsgHandleByUser(GroupMsgFormat)
return resp, nil
}
type MsgFormats []*open_im_sdk.MsgData

View File

@ -6,15 +6,24 @@ import (
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/kafka"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
"Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
"google.golang.org/grpc"
"net"
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
//var (
// sendMsgSuccessCounter prometheus.Counter
// sendMsgFailedCounter prometheus.Counter
//)
type rpcChat struct {
rpcPort int
rpcRegisterName string
@ -46,6 +55,32 @@ func NewRpcChatServer(port int) *rpcChat {
return &rc
}
func (rpc *rpcChat) initPrometheus() {
//sendMsgSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
// Name: "send_msg_success",
// Help: "The number of send msg success",
//})
//sendMsgFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
// Name: "send_msg_failed",
// Help: "The number of send msg failed",
//})
promePkg.NewMsgPullFromRedisSuccessCounter()
promePkg.NewMsgPullFromRedisFailedCounter()
promePkg.NewMsgPullFromMongoSuccessCounter()
promePkg.NewMsgPullFromMongoFailedCounter()
promePkg.NewSingleChatMsgRecvSuccessCounter()
promePkg.NewGroupChatMsgRecvSuccessCounter()
promePkg.NewWorkSuperGroupChatMsgRecvSuccessCounter()
promePkg.NewSingleChatMsgProcessSuccessCounter()
promePkg.NewSingleChatMsgProcessFailedCounter()
promePkg.NewGroupChatMsgProcessSuccessCounter()
promePkg.NewGroupChatMsgProcessFailedCounter()
promePkg.NewWorkSuperGroupChatMsgProcessSuccessCounter()
promePkg.NewWorkSuperGroupChatMsgProcessFailedCounter()
}
func (rpc *rpcChat) Run() {
log.Info("", "rpcChat init...")
listenIP := ""
@ -61,7 +96,18 @@ func (rpc *rpcChat) Run() {
}
log.Info("", "listen network success, address ", address)
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
rpcRegisterIP := config.Config.RpcRegisterIP
@ -78,6 +124,7 @@ func (rpc *rpcChat) Run() {
panic(utils.Wrap(err, "register chat module rpc to etcd err"))
}
go rpc.runCh()
rpc.initPrometheus()
err = srv.Serve(listener)
if err != nil {
log.Error("", "rpc rpcChat failed ", err.Error())

View File

@ -23,6 +23,7 @@ import (
"sync"
"time"
promePkg "Open_IM/pkg/common/prometheus"
go_redis "github.com/go-redis/redis/v8"
"github.com/golang/protobuf/proto"
)
@ -30,7 +31,9 @@ import (
//When the number of group members is greater than this valueOnline users will be sent firstGuaranteed service availability
const GroupMemberNum = 500
var ExcludeContentType = []int{constant.HasReadReceipt, constant.GroupHasReadReceipt}
var (
ExcludeContentType = []int{constant.HasReadReceipt, constant.GroupHasReadReceipt}
)
type MsgCallBackReq struct {
SendID string `json:"sendID"`
@ -242,11 +245,11 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
if !flag {
return returnMsg(&replay, pb, errCode, errMsg, "", 0)
}
flag, errCode, errMsg, _ = messageVerification(pb)
log.Info(pb.OperationID, "messageVerification ", flag, " cost time: ", time.Since(t1))
if !flag {
return returnMsg(&replay, pb, errCode, errMsg, "", 0)
}
//flag, errCode, errMsg, _ = messageVerification(pb)
//log.Info(pb.OperationID, "messageVerification ", flag, " cost time: ", time.Since(t1))
//if !flag {
// return returnMsg(&replay, pb, errCode, errMsg, "", 0)
//}
t1 = time.Now()
rpc.encapsulateMsgData(pb.MsgData)
log.Info(pb.OperationID, "encapsulateMsgData ", " cost time: ", time.Since(t1))
@ -268,6 +271,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
}
switch pb.MsgData.SessionType {
case constant.SingleChatType:
promePkg.PromeInc(promePkg.SingleChatMsgRecvSuccessCounter)
// callback
t1 = time.Now()
callbackResp := callbackBeforeSendSingleMsg(pb)
@ -280,8 +284,14 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
callbackResp.ErrCode = 201
}
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp)
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
}
flag, errCode, errMsg, _ = messageVerification(pb)
log.Info(pb.OperationID, "messageVerification ", flag, " cost time: ", time.Since(t1))
if !flag {
return returnMsg(&replay, pb, errCode, errMsg, "", 0)
}
t1 = time.Now()
isSend := modifyMessageByUserMessageReceiveOpt(pb.MsgData.RecvID, pb.MsgData.SendID, constant.SingleChatType, pb)
log.Info(pb.OperationID, "modifyMessageByUserMessageReceiveOpt ", " cost time: ", time.Since(t1))
@ -293,6 +303,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
log.Info(pb.OperationID, "sendMsgToKafka ", " cost time: ", time.Since(t1))
if err1 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err :RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String(), err1.Error())
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
}
}
@ -302,6 +313,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
log.Info(pb.OperationID, "sendMsgToKafka ", " cost time: ", time.Since(t1))
if err2 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err:SendID", msgToMQSingle.MsgData.SendID, msgToMQSingle.String())
promePkg.PromeInc(promePkg.SingleChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
}
}
@ -313,9 +325,11 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSingleMsg resp: ", callbackResp)
}
log.Debug(pb.OperationID, "send msg cost time all: ", db.GetCurrentTimestampByMill()-newTime, pb.MsgData.ClientMsgID)
promePkg.PromeInc(promePkg.SingleChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
case constant.GroupChatType:
// callback
promePkg.PromeInc(promePkg.GroupChatMsgRecvSuccessCounter)
callbackResp := callbackBeforeSendGroupMsg(pb)
if callbackResp.ErrCode != 0 {
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendGroupMsg resp:", callbackResp)
@ -325,10 +339,12 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
callbackResp.ErrCode = 201
}
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSingleMsg result", "end rpc and return", callbackResp)
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
}
var memberUserIDList []string
if flag, errCode, errMsg, memberUserIDList = messageVerification(pb); !flag {
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, errCode, errMsg, "", 0)
}
log.Debug(pb.OperationID, "GetGroupAllMember userID list", memberUserIDList, "len: ", len(memberUserIDList))
@ -393,6 +409,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
}
if !sendTag {
log.NewWarn(pb.OperationID, "send tag is ", sendTag)
promePkg.PromeInc(promePkg.GroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
} else {
if pb.MsgData.ContentType == constant.AtText {
@ -457,6 +474,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
}()
}
log.Debug(pb.OperationID, "send msg cost time3 ", db.GetCurrentTimestampByMill()-newTime, pb.MsgData.ClientMsgID)
promePkg.PromeInc(promePkg.GroupChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
}
case constant.NotificationChatType:
@ -479,6 +497,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
log.Debug(pb.OperationID, "send msg cost time ", db.GetCurrentTimestampByMill()-newTime, pb.MsgData.ClientMsgID)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
case constant.SuperGroupChatType:
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgRecvSuccessCounter)
// callback
callbackResp := callbackBeforeSendGroupMsg(pb)
if callbackResp.ErrCode != 0 {
@ -488,10 +507,12 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
if callbackResp.ErrCode == 0 {
callbackResp.ErrCode = 201
}
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter)
log.NewDebug(pb.OperationID, utils.GetSelfFuncName(), "callbackBeforeSendSuperGroupMsg result", "end rpc and return", callbackResp)
return returnMsg(&replay, pb, int32(callbackResp.ErrCode), callbackResp.ErrMsg, "", 0)
}
if flag, errCode, errMsg, _ = messageVerification(pb); !flag {
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, errCode, errMsg, "", 0)
}
msgToMQSingle.MsgData = pb.MsgData
@ -499,6 +520,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
err1 := rpc.sendMsgToKafka(&msgToMQSingle, msgToMQSingle.MsgData.GroupID, constant.OnlineStatus)
if err1 != nil {
log.NewError(msgToMQSingle.OperationID, "kafka send msg err:RecvID", msgToMQSingle.MsgData.RecvID, msgToMQSingle.String())
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessFailedCounter)
return returnMsg(&replay, pb, 201, "kafka send msg err", "", 0)
}
// callback
@ -506,6 +528,7 @@ func (rpc *rpcChat) SendMsg(_ context.Context, pb *pbChat.SendMsgReq) (*pbChat.S
if callbackResp.ErrCode != 0 {
log.NewError(pb.OperationID, utils.GetSelfFuncName(), "callbackAfterSendSuperGroupMsg resp: ", callbackResp)
}
promePkg.PromeInc(promePkg.WorkSuperGroupChatMsgProcessSuccessCounter)
return returnMsg(&replay, pb, 0, "", msgToMQSingle.MsgData.ServerMsgID, msgToMQSingle.MsgData.SendTime)
default:

View File

@ -8,6 +8,7 @@ import (
"Open_IM/pkg/common/db/mysql_model/im_mysql_model"
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbCache "Open_IM/pkg/proto/cache"
pbOffice "Open_IM/pkg/proto/office"
@ -21,6 +22,8 @@ import (
"time"
"unsafe"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@ -63,11 +66,21 @@ func (s *officeServer) Run() {
//grpc server
recvSize := 1024 * 1024 * 30
sendSize := 1024 * 1024 * 30
var options = []grpc.ServerOption{
var grpcOpts = []grpc.ServerOption{
grpc.MaxRecvMsgSize(recvSize),
grpc.MaxSendMsgSize(sendSize),
}
srv := grpc.NewServer(options...)
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//Service registers with etcd
pbOffice.RegisterOfficeServiceServer(srv, s)

View File

@ -8,6 +8,7 @@ import (
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbAuth "Open_IM/pkg/proto/auth"
@ -21,6 +22,8 @@ import (
"strings"
"time"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@ -58,7 +61,18 @@ func (s *organizationServer) Run() {
log.NewInfo("", "listen network success, ", address, listener)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//Service registers with etcd
rpc.RegisterOrganizationServer(srv, s)

View File

@ -8,6 +8,7 @@ import (
imdb "Open_IM/pkg/common/db/mysql_model/im_mysql_model"
rocksCache "Open_IM/pkg/common/db/rocks_cache"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
"Open_IM/pkg/common/token_verify"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbConversation "Open_IM/pkg/proto/conversation"
@ -21,6 +22,8 @@ import (
"strconv"
"strings"
grpcPrometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
"gorm.io/gorm"
)
@ -61,7 +64,18 @@ func (s *userServer) Run() {
log.NewInfo("0", "listen network success, address ", address, listener)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
var grpcOpts []grpc.ServerOption
if config.Config.Prometheus.Enable {
promePkg.NewGrpcRequestCounter()
promePkg.NewGrpcRequestFailedCounter()
promePkg.NewGrpcRequestSuccessCounter()
grpcOpts = append(grpcOpts, []grpc.ServerOption{
// grpc.UnaryInterceptor(promePkg.UnaryServerInterceptorProme),
grpc.StreamInterceptor(grpcPrometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpcPrometheus.UnaryServerInterceptor),
}...)
}
srv := grpc.NewServer(grpcOpts...)
defer srv.GracefulStop()
//Service registers with etcd
pbUser.RegisterUserServer(srv, s)

View File

@ -33,6 +33,13 @@ func GetGroupMemberUserIDList(groupID string, operationID string) ([]string, err
CacheGroupMtx.Lock()
defer CacheGroupMtx.Unlock()
if groupHashRemote == 0 {
log.Info(operationID, "groupHashRemote == 0 ", groupID)
delete(CacheGroupMemberUserIDList, groupID)
return []string{}, nil
}
groupInLocalCache, ok := CacheGroupMemberUserIDList[groupID]
if ok && groupInLocalCache.MemberListHash == groupHashRemote {
log.Debug(operationID, "in local cache ", groupID)

View File

@ -1,7 +1,6 @@
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -100,19 +99,21 @@ type config struct {
DBMaxOpenConns int `yaml:"dbMaxOpenConns"`
DBMaxIdleConns int `yaml:"dbMaxIdleConns"`
DBMaxLifeTime int `yaml:"dbMaxLifeTime"`
LogLevel int `yaml:"logLevel"`
SlowThreshold int `yaml:"slowThreshold"`
}
Mongo struct {
DBUri string `yaml:"dbUri"`
DBAddress string `yaml:"dbAddress"`
DBDirect bool `yaml:"dbDirect"`
DBTimeout int `yaml:"dbTimeout"`
DBDatabase string `yaml:"dbDatabase"`
DBSource string `yaml:"dbSource"`
DBUserName string `yaml:"dbUserName"`
DBPassword string `yaml:"dbPassword"`
DBMaxPoolSize int `yaml:"dbMaxPoolSize"`
DBRetainChatRecords int `yaml:"dbRetainChatRecords"`
ChatRecordsClearTime string `yaml:"chatRecordsClearTime"`
DBUri string `yaml:"dbUri"`
DBAddress []string `yaml:"dbAddress"`
DBDirect bool `yaml:"dbDirect"`
DBTimeout int `yaml:"dbTimeout"`
DBDatabase string `yaml:"dbDatabase"`
DBSource string `yaml:"dbSource"`
DBUserName string `yaml:"dbUserName"`
DBPassword string `yaml:"dbPassword"`
DBMaxPoolSize int `yaml:"dbMaxPoolSize"`
DBRetainChatRecords int `yaml:"dbRetainChatRecords"`
ChatRecordsClearTime string `yaml:"chatRecordsClearTime"`
}
Redis struct {
DBAddress []string `yaml:"dbAddress"`
@ -157,6 +158,8 @@ type config struct {
Etcd struct {
EtcdSchema string `yaml:"etcdSchema"`
EtcdAddr []string `yaml:"etcdAddr"`
UserName string `yaml:"userName"`
Password string `yaml:"password"`
}
Log struct {
StorageLocation string `yaml:"storageLocation"`
@ -205,6 +208,8 @@ type config struct {
Enable bool `yaml:"enable"`
Intent string `yaml:"intent"`
MasterSecret string `yaml:"masterSecret"`
ChannelID string `yaml:"channelID"`
ChannelName string `yaml:"channelName"`
}
Fcm struct {
ServiceAccount string `yaml:"serviceAccount"`
@ -218,7 +223,9 @@ type config struct {
}
Kafka struct {
Ws2mschat struct {
SASLUserName string `yaml:"SASLUserName"`
SASLPassword string `yaml:"SASLPassword"`
Ws2mschat struct {
Addr []string `yaml:"addr"`
Topic string `yaml:"topic"`
}
@ -545,7 +552,6 @@ type PDefaultTips struct {
func init() {
cfgName := os.Getenv("CONFIG_NAME")
fmt.Println("GET IM DEFAULT CONFIG PATH :", Root, "ENV PATH:", cfgName)
if len(cfgName) != 0 {
bytes, err := ioutil.ReadFile(filepath.Join(cfgName, "config", "config.yaml"))
if err != nil {

View File

@ -336,3 +336,5 @@ const LogFileName = "OpenIM.log"
const StatisticsTimeInterval = 60
const MaxNotificationNum = 100
const CurrentVersion = "v2.3.3-rc0"

View File

@ -182,7 +182,6 @@ func (d *DataBases) GetMessageListBySeq(userID string, seqList []uint32, operati
for _, v := range seqList {
//MESSAGE_CACHE:169.254.225.224_reliability1653387820_0_1
key := messageCache + userID + "_" + strconv.Itoa(int(v))
result, err := d.RDB.Get(context.Background(), key).Result()
if err != nil {
errResult = err
@ -205,7 +204,7 @@ func (d *DataBases) GetMessageListBySeq(userID string, seqList []uint32, operati
return seqMsg, failedSeqList, errResult
}
func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error {
func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) (error, int) {
ctx := context.Background()
pipe := d.RDB.Pipeline()
var failedList []pbChat.MsgDataToMQ
@ -225,10 +224,10 @@ func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string,
}
}
if len(failedList) != 0 {
return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID))
return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID)), len(failedList)
}
_, err := pipe.Exec(ctx)
return err
return err, 0
}
func (d *DataBases) DeleteMessageFromCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error {
ctx := context.Background()

View File

@ -4,6 +4,7 @@ import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/log"
promePkg "Open_IM/pkg/common/prometheus"
pbMsg "Open_IM/pkg/proto/msg"
"Open_IM/pkg/utils"
"context"
@ -14,10 +15,6 @@ import (
"go.mongodb.org/mongo-driver/mongo"
)
func (d *DataBases) BatchDeleteChat2DB(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) {
}
func (d *DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string, currentMaxSeq uint64) error {
newTime := getCurrentTimestampByMill()
if len(msgList) > GetSingleGocMsgNum() {
@ -85,10 +82,13 @@ func (d *DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataTo
sChat.Msg = msgListToMongo
log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter)
} else {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
return utils.Wrap(err, "")
}
@ -101,9 +101,11 @@ func (d *DataBases) BatchInsertChat2DB(userID string, msgList []*pbMsg.MsgDataTo
sChat.Msg = msgListToMongoNext
log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
promePkg.PromeInc(promePkg.MsgInsertMongoFailedCounter)
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
promePkg.PromeInc(promePkg.MsgInsertMongoSuccessCounter)
}
log.Debug(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList))
return nil
@ -129,8 +131,10 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err)
}
if err != nil && err != go_redis.Nil {
promePkg.PromeInc(promePkg.SeqGetFailedCounter)
return utils.Wrap(err, ""), 0
}
promePkg.PromeInc(promePkg.SeqGetSuccessCounter)
lastMaxSeq := currentMaxSeq
for _, m := range msgList {
@ -142,9 +146,12 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", insertID, "seq: ", currentMaxSeq)
}
log.Debug(operationID, "SetMessageToCache ", insertID, len(msgList))
err = d.SetMessageToCache(msgList, insertID, operationID)
err, failedNum := d.SetMessageToCache(msgList, insertID, operationID)
if err != nil {
promePkg.PromeAdd(promePkg.MsgInsertRedisFailedCounter, failedNum)
log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), insertID)
} else {
promePkg.PromeInc(promePkg.MsgInsertRedisSuccessCounter)
}
log.Debug(operationID, "batch to redis cost time ", getCurrentTimestampByMill()-newTime, insertID, len(msgList))
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
@ -152,6 +159,11 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
} else {
err = d.SetUserMaxSeq(insertID, currentMaxSeq)
}
if err != nil {
promePkg.PromeInc(promePkg.SeqSetFailedCounter)
} else {
promePkg.PromeInc(promePkg.SeqSetSuccessCounter)
}
return utils.Wrap(err, ""), lastMaxSeq
}
@ -171,106 +183,100 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
// }
// return nil, lastMaxSeq
//}
func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error {
newTime := getCurrentTimestampByMill()
if len(msgList) > GetSingleGocMsgNum() {
return errors.New("too large")
}
isInit := false
currentMaxSeq, err := d.GetUserMaxSeq(userID)
if err == nil {
} else if err == go_redis.Nil {
isInit = true
currentMaxSeq = 0
} else {
return utils.Wrap(err, "")
}
var remain uint64
//if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
// remain = uint64(GetSingleGocMsgNum()-1) - (currentMaxSeq % uint64(GetSingleGocMsgNum()))
//} else {
// remain = uint64(GetSingleGocMsgNum()) - ((currentMaxSeq - (uint64(GetSingleGocMsgNum()) - 1)) % uint64(GetSingleGocMsgNum()))
//}
blk0 := uint64(GetSingleGocMsgNum() - 1)
if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
remain = blk0 - currentMaxSeq
} else {
excludeBlk0 := currentMaxSeq - blk0
remain = (uint64(GetSingleGocMsgNum()) - (excludeBlk0 % uint64(GetSingleGocMsgNum()))) % uint64(GetSingleGocMsgNum())
}
insertCounter := uint64(0)
msgListToMongo := make([]MsgInfo, 0)
msgListToMongoNext := make([]MsgInfo, 0)
seqUid := ""
seqUidNext := ""
log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList))
//4998 remain ==1
//4999
for _, m := range msgList {
log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
currentMaxSeq++
sMsg := MsgInfo{}
sMsg.SendTime = m.MsgData.SendTime
m.MsgData.Seq = uint32(currentMaxSeq)
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return utils.Wrap(err, "")
}
if isInit {
msgListToMongoNext = append(msgListToMongoNext, sMsg)
seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
continue
}
if insertCounter < remain {
msgListToMongo = append(msgListToMongo, sMsg)
insertCounter++
seqUid = getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
} else {
msgListToMongoNext = append(msgListToMongoNext, sMsg)
seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
}
}
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
ctx := context.Background()
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
if seqUid != "" {
filter := bson.M{"uid": seqUid}
log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
if err != nil {
log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
return utils.Wrap(err, "")
}
}
if seqUidNext != "" {
filter := bson.M{"uid": seqUidNext}
sChat := UserChat{}
sChat.UID = seqUidNext
sChat.Msg = msgListToMongoNext
log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext)
if _, err = c.InsertOne(ctx, &sChat); err != nil {
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
}
log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList))
return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), "")
}
//
//func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error {
// newTime := getCurrentTimestampByMill()
// if len(msgList) > GetSingleGocMsgNum() {
// return errors.New("too large")
// }
// isInit := false
// currentMaxSeq, err := d.GetUserMaxSeq(userID)
// if err == nil {
//
// } else if err == go_redis.Nil {
// isInit = true
// currentMaxSeq = 0
// } else {
// return utils.Wrap(err, "")
// }
// var remain uint64
// //if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
// // remain = uint64(GetSingleGocMsgNum()-1) - (currentMaxSeq % uint64(GetSingleGocMsgNum()))
// //} else {
// // remain = uint64(GetSingleGocMsgNum()) - ((currentMaxSeq - (uint64(GetSingleGocMsgNum()) - 1)) % uint64(GetSingleGocMsgNum()))
// //}
//
// blk0 := uint64(GetSingleGocMsgNum() - 1)
// if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
// remain = blk0 - currentMaxSeq
// } else {
// excludeBlk0 := currentMaxSeq - blk0
// remain = (uint64(GetSingleGocMsgNum()) - (excludeBlk0 % uint64(GetSingleGocMsgNum()))) % uint64(GetSingleGocMsgNum())
// }
//
// insertCounter := uint64(0)
// msgListToMongo := make([]MsgInfo, 0)
// msgListToMongoNext := make([]MsgInfo, 0)
// seqUid := ""
// seqUidNext := ""
// log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList))
// //4998 remain ==1
// //4999
// for _, m := range msgList {
// log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
// currentMaxSeq++
// sMsg := MsgInfo{}
// sMsg.SendTime = m.MsgData.SendTime
// m.MsgData.Seq = uint32(currentMaxSeq)
// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
// return utils.Wrap(err, "")
// }
// if isInit {
// msgListToMongoNext = append(msgListToMongoNext, sMsg)
// seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
// log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
// continue
// }
// if insertCounter < remain {
// msgListToMongo = append(msgListToMongo, sMsg)
// insertCounter++
// seqUid = getSeqUid(userID, uint32(currentMaxSeq))
// log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
// } else {
// msgListToMongoNext = append(msgListToMongoNext, sMsg)
// seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
// log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
// }
// }
// // ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
//
// ctx := context.Background()
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
//
// if seqUid != "" {
// filter := bson.M{"uid": seqUid}
// log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
// err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
// if err != nil {
// log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
// return utils.Wrap(err, "")
// }
// }
// if seqUidNext != "" {
// filter := bson.M{"uid": seqUidNext}
// sChat := UserChat{}
// sChat.UID = seqUidNext
// sChat.Msg = msgListToMongoNext
// log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext)
// if _, err = c.InsertOne(ctx, &sChat); err != nil {
// log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
// return utils.Wrap(err, "")
// }
// }
// log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList))
// return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), "")
//}
//func (d *DataBases)setMessageToCache(msgList []*pbMsg.MsgDataToMQ, uid string) (err error) {
//
//}
func (d *DataBases) GetFromCacheAndInsertDB(msgUserIDPrefix string) {
//get value from redis
//batch insert to db
}

View File

@ -58,15 +58,30 @@ func init() {
// example: mongodb://$user:$password@mongo1.mongo:27017,mongo2.mongo:27017,mongo3.mongo:27017/$DBDatabase/?replicaSet=rs0&readPreference=secondary&authSource=admin&maxPoolSize=$DBMaxPoolSize
uri = config.Config.Mongo.DBUri
} else {
//mongodb://mongodb1.example.com:27317,mongodb2.example.com:27017/?replicaSet=mySet&authSource=authDB
mongodbHosts := ""
for i, v := range config.Config.Mongo.DBAddress {
if i == len(config.Config.Mongo.DBAddress)-1 {
mongodbHosts += v
} else {
mongodbHosts += v + ","
}
}
if config.Config.Mongo.DBPassword != "" && config.Config.Mongo.DBUserName != "" {
uri = fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin", config.Config.Mongo.DBUserName, config.Config.Mongo.DBPassword, config.Config.Mongo.DBAddress,
// clientOpts := options.Client().ApplyURI("mongodb://localhost:27017,localhost:27018/?replicaSet=replset")
//mongodb://[username:password@]host1[:port1][,...hostN[:portN]][/[defaultauthdb][?options]]
//uri = fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin&replicaSet=replset",
uri = fmt.Sprintf("mongodb://%s:%s@%s/%s?maxPoolSize=%d&authSource=admin",
config.Config.Mongo.DBUserName, config.Config.Mongo.DBPassword, mongodbHosts,
config.Config.Mongo.DBDatabase, config.Config.Mongo.DBMaxPoolSize)
} else {
uri = fmt.Sprintf("mongodb://%s/%s/?maxPoolSize=%d&authSource=admin",
config.Config.Mongo.DBAddress, config.Config.Mongo.DBDatabase,
mongodbHosts, config.Config.Mongo.DBDatabase,
config.Config.Mongo.DBMaxPoolSize)
}
}
mongoClient, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(uri))
if err != nil {
fmt.Println(" mongo.Connect failed, try ", utils.GetSelfFuncName(), err.Error(), uri)
@ -77,7 +92,7 @@ func init() {
panic(err1.Error())
}
}
fmt.Println("0", utils.GetSelfFuncName(), "mongo driver client init success: ", uri)
fmt.Println("mongo driver client init success: ", uri)
// mongodb create index
if err := createMongoIndex(mongoClient, cSendLog, false, "send_id", "-send_time"); err != nil {
fmt.Println("send_id", "-send_time", "index create failed", err.Error())
@ -100,7 +115,7 @@ func init() {
if err := createMongoIndex(mongoClient, cTag, true, "tag_id"); err != nil {
fmt.Println("tag_id", "index create failed", err.Error())
}
fmt.Println("create index success")
fmt.Println("createMongoIndex success")
DB.mongoClient = mongoClient
// redis pool init

View File

@ -23,6 +23,7 @@ func (w Writer) Printf(format string, args ...interface{}) {
}
func initMysqlDB() {
fmt.Println("init mysqlDB start")
//When there is no open IM database, connect to the mysql built-in database to create openIM database
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=true&loc=Local",
config.Config.Mysql.DBUserName, config.Config.Mysql.DBPassword, config.Config.Mysql.DBAddress[0], "mysql")
@ -30,42 +31,43 @@ func initMysqlDB() {
var err1 error
db, err := gorm.Open(mysql.Open(dsn), nil)
if err != nil {
fmt.Println("0", "Open failed ", err.Error(), dsn)
fmt.Println("Open failed ", err.Error(), dsn)
}
if err != nil {
time.Sleep(time.Duration(30) * time.Second)
db, err1 = gorm.Open(mysql.Open(dsn), nil)
if err1 != nil {
fmt.Println("0", "Open failed ", err1.Error(), dsn)
fmt.Println("Open failed ", err1.Error(), dsn)
panic(err1.Error())
}
}
//Check the database and table during initialization
sql := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s default charset utf8 COLLATE utf8_general_ci;", config.Config.Mysql.DBDatabaseName)
fmt.Println("exec sql: ", sql, " begin")
err = db.Exec(sql).Error
if err != nil {
fmt.Println("0", "Exec failed ", err.Error(), sql)
fmt.Println("Exec failed ", err.Error(), sql)
panic(err.Error())
}
fmt.Println("exec sql: ", sql, " end")
dsn = fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=true&loc=Local",
config.Config.Mysql.DBUserName, config.Config.Mysql.DBPassword, config.Config.Mysql.DBAddress[0], config.Config.Mysql.DBDatabaseName)
newLogger := logger.New(
Writer{},
logger.Config{
SlowThreshold: 200 * time.Millisecond, // Slow SQL threshold
LogLevel: logger.Warn, // Log level
IgnoreRecordNotFoundError: true, // Ignore ErrRecordNotFound error for logger
Colorful: true, // Disable color
SlowThreshold: time.Duration(config.Config.Mysql.SlowThreshold) * time.Millisecond, // Slow SQL threshold
LogLevel: logger.LogLevel(config.Config.Mysql.LogLevel), // Log level
IgnoreRecordNotFoundError: true, // Ignore ErrRecordNotFound error for logger
Colorful: true, // Disable color
},
)
db, err = gorm.Open(mysql.Open(dsn), &gorm.Config{
Logger: newLogger,
})
if err != nil {
fmt.Println("0", "Open failed ", err.Error(), dsn)
fmt.Println("Open failed ", err.Error(), dsn)
panic(err.Error())
}
@ -78,7 +80,7 @@ func initMysqlDB() {
sqlDB.SetMaxOpenConns(config.Config.Mysql.DBMaxOpenConns)
sqlDB.SetMaxIdleConns(config.Config.Mysql.DBMaxIdleConns)
fmt.Println("open db ok ", dsn)
fmt.Println("open mysql ok ", dsn)
db.AutoMigrate(
&Register{},
&Friend{},
@ -88,7 +90,7 @@ func initMysqlDB() {
&GroupRequest{},
&User{},
&Black{}, &ChatLog{}, &Register{}, &Conversation{}, &AppVersion{}, &Department{}, &BlackList{}, &IpLimit{}, &UserIpLimit{}, &Invitation{}, &RegisterAddFriend{},
&ClientInitConfig{})
&ClientInitConfig{}, &UserIpRecord{})
db.Set("gorm:table_options", "CHARSET=utf8")
db.Set("gorm:table_options", "collation=utf8_unicode_ci")
@ -179,6 +181,11 @@ func initMysqlDB() {
db.Migrator().CreateTable(&ClientInitConfig{})
}
if !db.Migrator().HasTable(&UserIpRecord{}) {
fmt.Println("CreateTable Friend")
db.Migrator().CreateTable(&UserIpRecord{})
}
DB.MysqlDB.db = db
return
}

View File

@ -2,7 +2,10 @@ package im_mysql_model
import (
"Open_IM/pkg/common/db"
"Open_IM/pkg/utils"
"time"
"gorm.io/gorm"
)
func IsLimitRegisterIp(RegisterIp string) (bool, error) {
@ -23,13 +26,22 @@ func IsLimitLoginIp(LoginIp string) (bool, error) {
return count > 0, nil
}
func IsLimitUserLoginIp(userID string, LoginIp string) (bool, error) {
func IsLimitUserLoginIp(userID string, loginIp string) (limit bool, err error) {
//如果已经存在则放行
var count int64
if err := db.DB.MysqlDB.DefaultGormDB().Table("user_ip_limits").Where("ip=? and user_id=?", LoginIp, userID).Count(&count).Error; err != nil {
return false, err
result := db.DB.MysqlDB.DefaultGormDB().Table("user_ip_limits").Where("user_id=?", userID).Count(&count)
if err := result.Error; err != nil {
return true, err
}
return count == 0, nil
if count < 1 {
return false, nil
}
result = db.DB.MysqlDB.DefaultGormDB().Table("user_ip_limits").Where("user_id=? and ip = ?", userID, loginIp).Count(&count)
if err := result.Error; err != nil {
return true, err
}
return count > 0, nil
}
func QueryIPLimits(ip string) (*db.IpLimit, error) {
@ -83,12 +95,12 @@ func InsertIpRecord(userID, createIp string) error {
func UpdateIpReocord(userID, ip string) (err error) {
record := &db.UserIpRecord{UserID: userID, LastLoginIp: ip, LastLoginTime: time.Now()}
result := db.DB.MysqlDB.DefaultGormDB().Model(&db.UserIpRecord{}).Where("user_id=?", userID).Updates(record).Updates("login_times = login_times + 1")
result := db.DB.MysqlDB.DefaultGormDB().Model(&db.UserIpRecord{}).Where("user_id=?", userID).Updates(record).Update("login_times", gorm.Expr("login_times+?", 1))
if result.Error != nil {
return result.Error
return utils.Wrap(result.Error, "")
}
if result.RowsAffected == 0 {
err = InsertIpRecord(userID, ip)
}
return err
return utils.Wrap(err, "")
}

View File

@ -12,11 +12,9 @@ import (
)
func init() {
//init managers
for k, v := range config.Config.Manager.AppManagerUid {
user, err := GetUserByUserID(v)
_, err := GetUserByUserID(v)
if err != nil {
fmt.Println("GetUserByUserID failed ", err.Error(), v, user)
} else {
continue
}
@ -30,9 +28,10 @@ func init() {
appMgr.AppMangerLevel = constant.AppAdmin
err = UserRegister(appMgr)
if err != nil {
fmt.Println("AppManager insert error", err.Error(), appMgr, "time: ", appMgr.Birth.Unix())
fmt.Println("AppManager insert error ", err.Error(), appMgr)
} else {
fmt.Println("AppManager insert ", appMgr)
}
}
}

View File

@ -71,7 +71,7 @@ func Test_NewSetMessageToCache(t *testing.T) {
data.AtUserIDList = []string{"1212", "23232"}
msg.MsgData = &data
messageList := []*pbChat.MsgDataToMQ{&msg}
err := DB.SetMessageToCache(messageList, uid, "cacheTest")
err, _ := DB.SetMessageToCache(messageList, uid, "cacheTest")
assert.Nil(t, err)
}

View File

@ -433,6 +433,13 @@ func DelJoinedSuperGroupIDListFromCache(userID string) error {
func GetGroupMemberListHashFromCache(groupID string) (uint64, error) {
generateHash := func() (string, error) {
groupInfo, err := GetGroupInfoFromCache(groupID)
if err != nil {
return "0", utils.Wrap(err, "GetGroupInfoFromCache failed")
}
if groupInfo.Status == constant.GroupStatusDismissed {
return "0", nil
}
groupMemberIDList, err := GetGroupMemberIDListFromCache(groupID)
if err != nil {
return "", utils.Wrap(err, "GetGroupMemberIDListFromCache failed")
@ -447,6 +454,9 @@ func GetGroupMemberListHashFromCache(groupID string) (uint64, error) {
return strconv.Itoa(int(bi.Uint64())), nil
}
hashCode, err := db.DB.Rc.Fetch(groupMemberListHashCache+groupID, time.Second*30*60, generateHash)
if err != nil {
return 0, utils.Wrap(err, "fetch failed")
}
hashCodeUint64, err := strconv.Atoi(hashCode)
return uint64(hashCodeUint64), err
}

View File

@ -1,8 +1,10 @@
package kafka
import (
"github.com/Shopify/sarama"
"Open_IM/pkg/common/config"
"sync"
"github.com/Shopify/sarama"
)
type Consumer struct {
@ -17,8 +19,13 @@ func NewKafkaConsumer(addr []string, topic string) *Consumer {
p := Consumer{}
p.Topic = topic
p.addr = addr
consumer, err := sarama.NewConsumer(p.addr, nil)
consumerConfig := sarama.NewConfig()
if config.Config.Kafka.SASLUserName != "" && config.Config.Kafka.SASLPassword != "" {
consumerConfig.Net.SASL.Enable = true
consumerConfig.Net.SASL.User = config.Config.Kafka.SASLUserName
consumerConfig.Net.SASL.Password = config.Config.Kafka.SASLPassword
}
consumer, err := sarama.NewConsumer(p.addr, consumerConfig)
if err != nil {
panic(err.Error())
return nil

View File

@ -1,11 +1,15 @@
package kafka
import (
"Open_IM/pkg/common/config"
log "Open_IM/pkg/common/log"
"Open_IM/pkg/utils"
"errors"
"github.com/Shopify/sarama"
"github.com/golang/protobuf/proto"
promePkg "Open_IM/pkg/common/prometheus"
)
type Producer struct {
@ -22,7 +26,11 @@ func NewKafkaProducer(addr []string, topic string) *Producer {
p.config.Producer.Return.Errors = true
p.config.Producer.RequiredAcks = sarama.WaitForAll //Set producer Message Reply level 0 1 all
p.config.Producer.Partitioner = sarama.NewHashPartitioner //Set the hash-key automatic hash partition. When sending a message, you must specify the key value of the message. If there is no key, the partition will be selected randomly
if config.Config.Kafka.SASLUserName != "" && config.Config.Kafka.SASLPassword != "" {
p.config.Net.SASL.Enable = true
p.config.Net.SASL.User = config.Config.Kafka.SASLUserName
p.config.Net.SASL.Password = config.Config.Kafka.SASLPassword
}
p.addr = addr
p.topic = topic
@ -57,5 +65,8 @@ func (p *Producer) SendMessage(m proto.Message, key string, operationID string)
}
a, b, c := p.producer.SendMessage(kMsg)
log.Info(operationID, "ByteEncoder SendMessage end", "key ", kMsg.Key.Length(), kMsg.Value.Length(), p.producer)
if c == nil {
promePkg.PromeInc(promePkg.SendMsgCounter)
}
return a, b, utils.Wrap(c, "")
}

View File

@ -0,0 +1,426 @@
package prometheus
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
//auth rpc
UserLoginCounter prometheus.Counter
UserRegisterCounter prometheus.Counter
//seg
SeqGetSuccessCounter prometheus.Counter
SeqGetFailedCounter prometheus.Counter
SeqSetSuccessCounter prometheus.Counter
SeqSetFailedCounter prometheus.Counter
//msg-db
MsgInsertRedisSuccessCounter prometheus.Counter
MsgInsertRedisFailedCounter prometheus.Counter
MsgInsertMongoSuccessCounter prometheus.Counter
MsgInsertMongoFailedCounter prometheus.Counter
MsgPullFromRedisSuccessCounter prometheus.Counter
MsgPullFromRedisFailedCounter prometheus.Counter
MsgPullFromMongoSuccessCounter prometheus.Counter
MsgPullFromMongoFailedCounter prometheus.Counter
//msg-ws
MsgRecvTotalCounter prometheus.Counter
GetNewestSeqTotalCounter prometheus.Counter
PullMsgBySeqListTotalCounter prometheus.Counter
SingleChatMsgRecvSuccessCounter prometheus.Counter
GroupChatMsgRecvSuccessCounter prometheus.Counter
WorkSuperGroupChatMsgRecvSuccessCounter prometheus.Counter
OnlineUserGauge prometheus.Gauge
//msg-msg
SingleChatMsgProcessSuccessCounter prometheus.Counter
SingleChatMsgProcessFailedCounter prometheus.Counter
GroupChatMsgProcessSuccessCounter prometheus.Counter
GroupChatMsgProcessFailedCounter prometheus.Counter
WorkSuperGroupChatMsgProcessSuccessCounter prometheus.Counter
WorkSuperGroupChatMsgProcessFailedCounter prometheus.Counter
//msg-push
MsgOnlinePushSuccessCounter prometheus.Counter
MsgOfflinePushSuccessCounter prometheus.Counter
MsgOfflinePushFailedCounter prometheus.Counter
// api
ApiRequestCounter prometheus.Counter
ApiRequestSuccessCounter prometheus.Counter
ApiRequestFailedCounter prometheus.Counter
// grpc
GrpcRequestCounter prometheus.Counter
GrpcRequestSuccessCounter prometheus.Counter
GrpcRequestFailedCounter prometheus.Counter
SendMsgCounter prometheus.Counter
)
func NewUserLoginCounter() {
if UserLoginCounter != nil {
return
}
UserLoginCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "user_login",
Help: "The number of user login",
})
}
func NewUserRegisterCounter() {
if UserRegisterCounter != nil {
return
}
UserRegisterCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "user_register",
Help: "The number of user register",
})
}
func NewSeqGetSuccessCounter() {
if SeqGetSuccessCounter != nil {
return
}
SeqGetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_get_success",
Help: "The number of successful get seq",
})
}
func NewSeqGetFailedCounter() {
if SeqGetFailedCounter != nil {
return
}
SeqGetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_get_failed",
Help: "The number of failed get seq",
})
}
func NewSeqSetSuccessCounter() {
if SeqSetSuccessCounter != nil {
return
}
SeqSetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_set_success",
Help: "The number of successful set seq",
})
}
func NewSeqSetFailedCounter() {
if SeqSetFailedCounter != nil {
return
}
SeqSetFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "seq_set_failed",
Help: "The number of failed set seq",
})
}
func NewApiRequestCounter() {
if ApiRequestCounter != nil {
return
}
ApiRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request",
Help: "The number of api request",
})
}
func NewApiRequestSuccessCounter() {
if ApiRequestSuccessCounter != nil {
return
}
ApiRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request_success",
Help: "The number of api request success",
})
}
func NewApiRequestFailedCounter() {
if ApiRequestFailedCounter != nil {
return
}
ApiRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "api_request_failed",
Help: "The number of api request failed",
})
}
func NewGrpcRequestCounter() {
if GrpcRequestCounter != nil {
return
}
GrpcRequestCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request",
Help: "The number of api request",
})
}
func NewGrpcRequestSuccessCounter() {
if GrpcRequestSuccessCounter != nil {
return
}
GrpcRequestSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request_success",
Help: "The number of grpc request success",
})
}
func NewGrpcRequestFailedCounter() {
if GrpcRequestFailedCounter != nil {
return
}
GrpcRequestFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "grpc_request_failed",
Help: "The number of grpc request failed",
})
}
func NewSendMsgCount() {
if SendMsgCounter != nil {
return
}
SendMsgCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "send_msg",
Help: "The number of send msg",
})
}
func NewMsgInsertRedisSuccessCounter() {
if MsgInsertRedisSuccessCounter != nil {
return
}
MsgInsertRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_success",
Help: "The number of successful insert msg to redis",
})
}
func NewMsgInsertRedisFailedCounter() {
if MsgInsertRedisFailedCounter != nil {
return
}
MsgInsertRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_redis_failed",
Help: "The number of failed insert msg to redis",
})
}
func NewMsgInsertMongoSuccessCounter() {
if MsgInsertMongoSuccessCounter != nil {
return
}
MsgInsertMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_success",
Help: "The number of successful insert msg to mongo",
})
}
func NewMsgInsertMongoFailedCounter() {
if MsgInsertMongoFailedCounter != nil {
return
}
MsgInsertMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_insert_mongo_failed",
Help: "The number of failed insert msg to mongo",
})
}
func NewMsgPullFromRedisSuccessCounter() {
if MsgPullFromRedisSuccessCounter != nil {
return
}
MsgPullFromRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_redis_success",
Help: "The number of successful pull msg from redis",
})
}
func NewMsgPullFromRedisFailedCounter() {
if MsgPullFromRedisFailedCounter != nil {
return
}
MsgPullFromRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_redis_failed",
Help: "The number of failed pull msg from redis",
})
}
func NewMsgPullFromMongoSuccessCounter() {
if MsgPullFromMongoSuccessCounter != nil {
return
}
MsgPullFromMongoSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_mongo_success",
Help: "The number of successful pull msg from mongo",
})
}
func NewMsgPullFromMongoFailedCounter() {
if MsgPullFromMongoFailedCounter != nil {
return
}
MsgPullFromMongoFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_pull_from_mongo_failed",
Help: "The number of failed pull msg from mongo",
})
}
func NewMsgRecvTotalCounter() {
if MsgRecvTotalCounter != nil {
return
}
MsgRecvTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_recv_total",
Help: "The number of msg received",
})
}
func NewGetNewestSeqTotalCounter() {
if GetNewestSeqTotalCounter != nil {
return
}
GetNewestSeqTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "get_newest_seq_total",
Help: "the number of get newest seq",
})
}
func NewPullMsgBySeqListTotalCounter() {
if PullMsgBySeqListTotalCounter != nil {
return
}
PullMsgBySeqListTotalCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "pull_msg_by_seq_list_total",
Help: "The number of pull msg by seq list",
})
}
func NewSingleChatMsgRecvSuccessCounter() {
if SingleChatMsgRecvSuccessCounter != nil {
return
}
SingleChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_recv_success",
Help: "The number of single chat msg successful received ",
})
}
func NewGroupChatMsgRecvSuccessCounter() {
if GroupChatMsgRecvSuccessCounter != nil {
return
}
GroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_recv_success",
Help: "The number of group chat msg successful received",
})
}
func NewWorkSuperGroupChatMsgRecvSuccessCounter() {
if WorkSuperGroupChatMsgRecvSuccessCounter != nil {
return
}
WorkSuperGroupChatMsgRecvSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_recv_success",
Help: "The number of work/super group chat msg successful received",
})
}
func NewOnlineUserGauges() {
if OnlineUserGauge != nil {
return
}
OnlineUserGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "online_user_num",
Help: "The number of online user num",
})
}
func NewSingleChatMsgProcessSuccessCounter() {
if SingleChatMsgProcessSuccessCounter != nil {
return
}
SingleChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_success",
Help: "The number of single chat msg successful processed",
})
}
func NewSingleChatMsgProcessFailedCounter() {
if SingleChatMsgProcessFailedCounter != nil {
return
}
SingleChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "single_chat_msg_process_failed",
Help: "The number of single chat msg failed processed",
})
}
func NewGroupChatMsgProcessSuccessCounter() {
if GroupChatMsgProcessSuccessCounter != nil {
return
}
GroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_success",
Help: "The number of group chat msg successful processed",
})
}
func NewGroupChatMsgProcessFailedCounter() {
if GroupChatMsgProcessFailedCounter != nil {
return
}
GroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "group_chat_msg_process_failed",
Help: "The number of group chat msg failed processed",
})
}
func NewWorkSuperGroupChatMsgProcessSuccessCounter() {
if WorkSuperGroupChatMsgProcessSuccessCounter != nil {
return
}
WorkSuperGroupChatMsgProcessSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_process_success",
Help: "The number of work/super group chat msg successful processed",
})
}
func NewWorkSuperGroupChatMsgProcessFailedCounter() {
if WorkSuperGroupChatMsgProcessFailedCounter != nil {
return
}
WorkSuperGroupChatMsgProcessFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "work_super_group_chat_msg_process_failed",
Help: "The number of work/super group chat msg failed processed",
})
}
func NewMsgOnlinePushSuccessCounter() {
if MsgOnlinePushSuccessCounter != nil {
return
}
MsgOnlinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_online_push_success",
Help: "The number of msg successful online pushed",
})
}
func NewMsgOfflinePushSuccessCounter() {
if MsgOfflinePushSuccessCounter != nil {
return
}
MsgOfflinePushSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_offline_push_success",
Help: "The number of msg successful offline pushed",
})
}
func NewMsgOfflinePushFailedCounter() {
if MsgOfflinePushFailedCounter != nil {
return
}
MsgOfflinePushFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "msg_offline_push_failed",
Help: "The number of msg failed offline pushed",
})
}

View File

@ -0,0 +1,35 @@
package prometheus
import (
"context"
"encoding/json"
"time"
"Open_IM/pkg/common/log"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
)
func UnaryServerInterceptorProme(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
remote, _ := peer.FromContext(ctx)
remoteAddr := remote.Addr.String()
in, _ := json.Marshal(req)
inStr := string(in)
log.NewInfo("ip", remoteAddr, "access_start", info.FullMethod, "in", inStr)
start := time.Now()
defer func() {
out, _ := json.Marshal(resp)
outStr := string(out)
duration := int64(time.Since(start) / time.Millisecond)
if duration >= 500 {
log.NewInfo("ip", remoteAddr, "access_end", info.FullMethod, "in", inStr, "out", outStr, "err", err, "duration/ms", duration)
} else {
log.NewInfo("ip", remoteAddr, "access_end", info.FullMethod, "in", inStr, "out", outStr, "err", err, "duration/ms", duration)
}
}()
resp, err = handler(ctx, req)
return
}

View File

@ -2,10 +2,12 @@ package prometheus
import (
"Open_IM/pkg/common/config"
"bytes"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
@ -24,3 +26,57 @@ func PrometheusHandler() gin.HandlerFunc {
h.ServeHTTP(c.Writer, c.Request)
}
}
type responseBodyWriter struct {
gin.ResponseWriter
body *bytes.Buffer
}
func (r responseBodyWriter) Write(b []byte) (int, error) {
r.body.Write(b)
return r.ResponseWriter.Write(b)
}
func PromeTheusMiddleware(c *gin.Context) {
PromeInc(ApiRequestCounter)
w := &responseBodyWriter{body: &bytes.Buffer{}, ResponseWriter: c.Writer}
c.Writer = w
c.Next()
if c.Writer.Status() == http.StatusOK {
PromeInc(ApiRequestSuccessCounter)
} else {
PromeInc(ApiRequestFailedCounter)
}
}
func PromeInc(counter prometheus.Counter) {
if config.Config.Prometheus.Enable {
if counter != nil {
counter.Inc()
}
}
}
func PromeAdd(counter prometheus.Counter, add int) {
if config.Config.Prometheus.Enable {
if counter != nil {
counter.Add(float64(add))
}
}
}
func PromeGaugeInc(gauges prometheus.Gauge) {
if config.Config.Prometheus.Enable {
if gauges != nil {
gauges.Inc()
}
}
}
func PromeGaugeDec(gauges prometheus.Gauge) {
if config.Config.Prometheus.Enable {
if gauges != nil {
gauges.Dec()
}
}
}

View File

@ -6,7 +6,6 @@ import (
"Open_IM/pkg/utils"
"context"
"fmt"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
@ -39,6 +38,8 @@ var (
func NewResolver(schema, etcdAddr, serviceName string, operationID string) (*Resolver, error) {
etcdCli, err := clientv3.New(clientv3.Config{
Endpoints: strings.Split(etcdAddr, ","),
Username: config.Config.Etcd.UserName,
Password: config.Config.Etcd.Password,
})
if err != nil {
log.Error(operationID, "etcd client v3 failed")
@ -269,8 +270,38 @@ func (r *Resolver) watch(prefix string, addrList []resolver.Address) {
}
}
var Conn4UniqueList []*grpc.ClientConn
var Conn4UniqueListMtx sync.RWMutex
var IsUpdateStart bool
var IsUpdateStartMtx sync.RWMutex
func GetDefaultGatewayConn4Unique(schema, etcdaddr, operationID string) []*grpc.ClientConn {
grpcConns := getConn4Unique(schema, etcdaddr, config.Config.RpcRegisterName.OpenImRelayName)
IsUpdateStartMtx.Lock()
if IsUpdateStart == false {
Conn4UniqueList = getConn4Unique(schema, etcdaddr, config.Config.RpcRegisterName.OpenImRelayName)
go func() {
for {
select {
case <-time.After(time.Second * time.Duration(30)):
Conn4UniqueListMtx.Lock()
Conn4UniqueList = getConn4Unique(schema, etcdaddr, config.Config.RpcRegisterName.OpenImRelayName)
Conn4UniqueListMtx.Unlock()
}
}
}()
}
IsUpdateStart = true
IsUpdateStartMtx.Unlock()
Conn4UniqueListMtx.Lock()
var clientConnList []*grpc.ClientConn
for _, v := range Conn4UniqueList {
clientConnList = append(clientConnList, v)
}
Conn4UniqueListMtx.Unlock()
//grpcConns := getConn4Unique(schema, etcdaddr, config.Config.RpcRegisterName.OpenImRelayName)
grpcConns := clientConnList
if len(grpcConns) > 0 {
return grpcConns
}

View File

@ -2,7 +2,6 @@ package utils
import (
"errors"
"fmt"
"net"
)
@ -11,14 +10,12 @@ var ServerIP = ""
func GetLocalIP() (string, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", err
}
for _, address := range addrs {
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
fmt.Println(ipnet.IP.String())
return ipnet.IP.String(), nil
}
}

View File

@ -22,7 +22,12 @@ sleep 1
cd ${msg_transfer_binary_root}
for ((i = 0; i < ${msg_transfer_service_num}; i++)); do
nohup ./${msg_transfer_name} -prometheus_port ${prome_ports[$i]} >>../logs/openIM.log 2>&1 &
prome_port=${prome_ports[$i]}
cmd="nohup ./${msg_transfer_name}"
if [ $prome_port != "" ]; then
cmd="$cmd -prometheus_port $prome_port"
fi
$cmd >>../logs/openIM.log 2>&1 &
done
#Check launched service process

View File

@ -80,10 +80,8 @@ for ((i = 0; i < ${#service_filename[*]}; i++)); do
list_to_string $portList2
prome_ports=($ports_array)
#Start related rpc services based on the number of ports
# for j in ${service_ports}; do
for ((j = 0; j < ${#service_ports[*]}; j++)); do
#Start the service in the background
# ./${service_filename[$i]} -port $j &
cmd="./${service_filename[$i]} -port ${service_ports[$j]} -prometheus_port ${prome_ports[$j]}"
if [ $i -eq 0 -o $i -eq 1 ]; then
cmd="./${service_filename[$i]} -port ${service_ports[$j]}"