feat: Prometheus can auto set port (#2943)

* feat: config

* feat: prometheus auto set port
This commit is contained in:
icey-yu 2024-12-10 11:43:19 +08:00 committed by GitHub
parent 54be837765
commit 6a7ae690ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 521 additions and 164 deletions

View File

@ -10,7 +10,10 @@ api:
prometheus: prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# autoSetPorts indicates whether to automatically set the ports
autoSetPorts: true
# Prometheus listening ports, must match the number of api.ports # Prometheus listening ports, must match the number of api.ports
# It will only take effect when autoSetPorts is set to false.
ports: [ 12002 ] ports: [ 12002 ]
# This address can be accessed via a browser # This address can be accessed via a browser
grafanaURL: http://127.0.0.1:13000/ grafanaURL: http://127.0.0.1:13000/

View File

@ -12,6 +12,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ] ports: [ 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155 ]
# IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP # IP address that the RPC/WebSocket service listens on; setting to 0.0.0.0 listens on both internal and external IPs. If left blank, it automatically uses the internal network IP

View File

@ -1,6 +1,8 @@
prometheus: prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# autoSetPorts indicates whether to automatically set the ports
autoSetPorts: true
# List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly # List of ports that Prometheus listens on; each port corresponds to an instance of monitoring. Ensure these are managed accordingly
# Because four instances have been launched, four ports need to be specified # It will only take effect when autoSetPorts is set to false.
ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035 ] ports: [ 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035 ]

View File

@ -15,6 +15,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12182, 12183, 12184, 12185, 12186 ] ports: [ 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12182, 12183, 12184, 12185, 12186 ]
maxConcurrentWorkers: 3 maxConcurrentWorkers: 3

View File

@ -15,6 +15,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12200 ] ports: [ 12200 ]
tokenPolicy: tokenPolicy:

View File

@ -14,4 +14,5 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12220 ] ports: [ 12220 ]

View File

@ -14,4 +14,5 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12240 ] ports: [ 12240 ]

View File

@ -14,6 +14,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12260 ] ports: [ 12260 ]

View File

@ -14,6 +14,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12280 ] ports: [ 12280 ]

View File

@ -14,6 +14,7 @@ prometheus:
# Enable or disable Prometheus monitoring # Enable or disable Prometheus monitoring
enable: true enable: true
# List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup # List of ports that Prometheus listens on; these must match the number of rpc.ports to ensure correct monitoring setup
# It will only take effect when autoSetPorts is set to false.
ports: [ 12300 ] ports: [ 12300 ]

View File

@ -14,4 +14,5 @@ prometheus:
# Whether to enable prometheus # Whether to enable prometheus
enable: true enable: true
# Prometheus listening ports, must be consistent with the number of rpc.ports # Prometheus listening ports, must be consistent with the number of rpc.ports
# It will only take effect when autoSetPorts is set to false.
ports: [ 12320 ] ports: [ 12320 ]

View File

@ -26,61 +26,94 @@ scrape_configs:
- job_name: node_exporter - job_name: node_exporter
static_configs: static_configs:
- targets: [ internal_ip:20500 ] - targets: [ internal_ip:20500 ]
- job_name: openimserver-openim-api - job_name: openimserver-openim-api
static_configs: http_sd_configs:
- targets: [ internal_ip:12002 ] - url: "http://internal_ip:10002/prometheus_discovery/api"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12002 ]
# labels:
# namespace: default
- job_name: openimserver-openim-msggateway - job_name: openimserver-openim-msggateway
static_configs: http_sd_configs:
- targets: [ internal_ip:12140 ] - url: "http://internal_ip:10002/prometheus_discovery/msg_gateway"
# - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ] # static_configs:
labels: # - targets: [ internal_ip:12140 ]
namespace: default # # - targets: [ internal_ip:12140, internal_ip:12141, internal_ip:12142, internal_ip:12143, internal_ip:12144, internal_ip:12145, internal_ip:12146, internal_ip:12147, internal_ip:12148, internal_ip:12149, internal_ip:12150, internal_ip:12151, internal_ip:12152, internal_ip:12153, internal_ip:12154, internal_ip:12155 ]
# labels:
# namespace: default
- job_name: openimserver-openim-msgtransfer - job_name: openimserver-openim-msgtransfer
static_configs: http_sd_configs:
- targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ] - url: "http://internal_ip:10002/prometheus_discovery/msg_transfer"
# - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ] # static_configs:
labels: # - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027 ]
namespace: default # # - targets: [ internal_ip:12020, internal_ip:12021, internal_ip:12022, internal_ip:12023, internal_ip:12024, internal_ip:12025, internal_ip:12026, internal_ip:12027, internal_ip:12028, internal_ip:12029, internal_ip:12030, internal_ip:12031, internal_ip:12032, internal_ip:12033, internal_ip:12034, internal_ip:12035 ]
# labels:
# namespace: default
- job_name: openimserver-openim-push - job_name: openimserver-openim-push
static_configs: http_sd_configs:
- targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ] - url: "http://internal_ip:10002/prometheus_discovery/push"
# - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185, internal_ip:12186 ] # static_configs:
labels: # - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177 ]
namespace: default ## - targets: [ internal_ip:12170, internal_ip:12171, internal_ip:12172, internal_ip:12173, internal_ip:12174, internal_ip:12175, internal_ip:12176, internal_ip:12177, internal_ip:12178, internal_ip:12179, internal_ip:12180, internal_ip:12182, internal_ip:12183, internal_ip:12184, internal_ip:12185, internal_ip:12186 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-auth - job_name: openimserver-openim-rpc-auth
static_configs: http_sd_configs:
- targets: [ internal_ip:12200 ] - url: "http://internal_ip:10002/prometheus_discovery/auth"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12200 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-conversation - job_name: openimserver-openim-rpc-conversation
static_configs: http_sd_configs:
- targets: [ internal_ip:12220 ] - url: "http://internal_ip:10002/prometheus_discovery/conversation"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12220 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-friend - job_name: openimserver-openim-rpc-friend
static_configs: http_sd_configs:
- targets: [ internal_ip:12240 ] - url: "http://internal_ip:10002/prometheus_discovery/friend"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12240 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-group - job_name: openimserver-openim-rpc-group
static_configs: http_sd_configs:
- targets: [ internal_ip:12260 ] - url: "http://internal_ip:10002/prometheus_discovery/group"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12260 ]
# labels:
# namespace: default.
- job_name: openimserver-openim-rpc-msg - job_name: openimserver-openim-rpc-msg
static_configs: http_sd_configs:
- targets: [ internal_ip:12280 ] - url: "http://internal_ip:10002/prometheus_discovery/msg"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12280 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-third - job_name: openimserver-openim-rpc-third
static_configs: http_sd_configs:
- targets: [ internal_ip:12300 ] - url: "http://internal_ip:10002/prometheus_discovery/third"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12300 ]
# labels:
# namespace: default
- job_name: openimserver-openim-rpc-user - job_name: openimserver-openim-rpc-user
static_configs: http_sd_configs:
- targets: [ internal_ip:12320 ] - url: "http://internal_ip:10002/prometheus_discovery/user"
labels: # static_configs:
namespace: default # - targets: [ internal_ip:12320 ]
# labels:
# namespace: default

View File

@ -146,49 +146,49 @@ services:
networks: networks:
- openim - openim
# prometheus: prometheus:
# image: ${PROMETHEUS_IMAGE} image: ${PROMETHEUS_IMAGE}
# container_name: prometheus container_name: prometheus
# restart: always restart: always
# user: root user: root
# volumes: volumes:
# - ./config/prometheus.yml:/etc/prometheus/prometheus.yml - ./config/prometheus.yml:/etc/prometheus/prometheus.yml
# - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml - ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
# - ${DATA_DIR}/components/prometheus/data:/prometheus - ${DATA_DIR}/components/prometheus/data:/prometheus
# command: command:
# - '--config.file=/etc/prometheus/prometheus.yml' - '--config.file=/etc/prometheus/prometheus.yml'
# - '--storage.tsdb.path=/prometheus' - '--storage.tsdb.path=/prometheus'
# ports: ports:
# - "19091:9090" - "19091:9090"
# networks: networks:
# - openim - openim
#
# alertmanager: alertmanager:
# image: ${ALERTMANAGER_IMAGE} image: ${ALERTMANAGER_IMAGE}
# container_name: alertmanager container_name: alertmanager
# restart: always restart: always
# volumes: volumes:
# - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml - ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# - ./config/email.tmpl:/etc/alertmanager/email.tmpl - ./config/email.tmpl:/etc/alertmanager/email.tmpl
# ports: ports:
# - "19093:9093" - "19093:9093"
# networks: networks:
# - openim - openim
#
# grafana: grafana:
# image: ${GRAFANA_IMAGE} image: ${GRAFANA_IMAGE}
# container_name: grafana container_name: grafana
# user: root user: root
# restart: always restart: always
# environment: environment:
# - GF_SECURITY_ALLOW_EMBEDDING=true - GF_SECURITY_ALLOW_EMBEDDING=true
# - GF_SESSION_COOKIE_SAMESITE=none - GF_SESSION_COOKIE_SAMESITE=none
# - GF_SESSION_COOKIE_SECURE=true - GF_SESSION_COOKIE_SECURE=true
# - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ENABLED=true
# - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
# ports: ports:
# - "13000:3000" - "13000:3000"
# volumes: volumes:
# - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana - ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
# networks: networks:
# - openim - openim

4
go.mod
View File

@ -15,7 +15,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/openimsdk/protocol v0.0.72-alpha.61 github.com/openimsdk/protocol v0.0.72-alpha.61
github.com/openimsdk/tools v0.0.50-alpha.46 github.com/openimsdk/tools v0.0.50-alpha.47
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
@ -42,6 +42,7 @@ require (
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/shirou/gopsutil v3.21.11+incompatible github.com/shirou/gopsutil v3.21.11+incompatible
github.com/spf13/viper v1.18.2 github.com/spf13/viper v1.18.2
go.etcd.io/etcd/client/v3 v3.5.13
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/sync v0.8.0 golang.org/x/sync v0.8.0
@ -179,7 +180,6 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.13 // indirect go.etcd.io/etcd/api/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect

4
go.sum
View File

@ -349,8 +349,8 @@ github.com/openimsdk/gomake v0.0.15-alpha.2 h1:5Q8yl8ezy2yx+q8/ucU/t4kJnDfCzNOrk
github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI= github.com/openimsdk/gomake v0.0.15-alpha.2/go.mod h1:PndCozNc2IsQIciyn9mvEblYWZwJmAI+06z94EY+csI=
github.com/openimsdk/protocol v0.0.72-alpha.61 h1:RuZR9/Sg3p6Bpb2CKPjPoA2AUmTvHITmhZ3PT/RbWMs= github.com/openimsdk/protocol v0.0.72-alpha.61 h1:RuZR9/Sg3p6Bpb2CKPjPoA2AUmTvHITmhZ3PT/RbWMs=
github.com/openimsdk/protocol v0.0.72-alpha.61/go.mod h1:Iet+piS/jaS+kWWyj6EEr36mk4ISzIRYjoMSVA4dq2M= github.com/openimsdk/protocol v0.0.72-alpha.61/go.mod h1:Iet+piS/jaS+kWWyj6EEr36mk4ISzIRYjoMSVA4dq2M=
github.com/openimsdk/tools v0.0.50-alpha.46 h1:j3HxPxhDptVHwr7eChL2rCH8mKfpUEcr4nHi5k4yDME= github.com/openimsdk/tools v0.0.50-alpha.47 h1:Cfe2va/g6WhLjOoQqZkjrdlEDq1dUsfcQsdUB5oADVA=
github.com/openimsdk/tools v0.0.50-alpha.46/go.mod h1:muCtxguNJv8lFwLei27UASu2Nvg4ERSeN0R4K5tivk0= github.com/openimsdk/tools v0.0.50-alpha.47/go.mod h1:muCtxguNJv8lFwLei27UASu2Nvg4ERSeN0R4K5tivk0=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=

View File

@ -16,6 +16,7 @@ package api
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
@ -26,16 +27,17 @@ import (
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/tools/utils/runtimeenv"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery" "github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log" "github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/system/program" "github.com/openimsdk/tools/system/program"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/tools/utils/runtimeenv"
) )
type Config struct { type Config struct {
@ -68,16 +70,57 @@ func Start(ctx context.Context, index int, config *Config) error {
prometheusPort int prometheusPort int
) )
router := newGinRouter(client, config) registerIP, err := network.GetRpcRegisterIP("")
if err != nil {
return err
}
getAutoPort := func() (net.Listener, int, error) {
registerAddr := net.JoinHostPort(registerIP, "0")
listener, err := net.Listen("tcp", registerAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if config.API.Prometheus.AutoSetPorts && config.Discovery.Enable != kdisc.Etcd {
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
}
router := newGinRouter(client, config, client)
if config.API.Prometheus.Enable { if config.API.Prometheus.Enable {
go func() { var (
listener net.Listener
)
if config.API.Prometheus.AutoSetPorts {
listener, prometheusPort, err = getAutoPort()
if err != nil {
return err
}
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index) prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
if err != nil { if err != nil {
netErr = err return err
netDone <- struct{}{}
return
} }
if err := prommetrics.ApiInit(prometheusPort); err != nil && err != http.ErrServerClosed { listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
}
}
go func() {
if err := prommetrics.ApiInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort)) netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
netDone <- struct{}{} netDone <- struct{}{}
} }
@ -90,7 +133,7 @@ func Start(ctx context.Context, index int, config *Config) error {
log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort) log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
go func() { go func() {
err = server.ListenAndServe() err = server.ListenAndServe()
if err != nil && err != http.ErrServerClosed { if err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, fmt.Sprintf("api start err: %s", server.Addr)) netErr = errs.WrapMsg(err, fmt.Sprintf("api start err: %s", server.Addr))
netDone <- struct{}{} netDone <- struct{}{}

View File

@ -0,0 +1,113 @@
package api
import (
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/apiresp"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
clientv3 "go.etcd.io/etcd/client/v3"
"net/http"
)
type PrometheusDiscoveryApi struct {
config *Config
client *clientv3.Client
}
func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegistry) *PrometheusDiscoveryApi {
api := &PrometheusDiscoveryApi{
config: config,
}
if config.Discovery.Enable == discoveryregister.Etcd {
api.client = client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
}
return api
}
func (p *PrometheusDiscoveryApi) Enable(c *gin.Context) {
if p.config.Discovery.Enable != discoveryregister.Etcd {
c.JSON(http.StatusOK, []struct{}{})
c.Abort()
}
}
func (p *PrometheusDiscoveryApi) discovery(c *gin.Context, key string) {
eResp, err := p.client.Get(c, prommetrics.BuildDiscoveryKey(key))
if err != nil {
// Log and respond with an error if preparation fails.
apiresp.GinError(c, errs.WrapMsg(err, "etcd get err"))
return
}
if len(eResp.Kvs) == 0 {
c.JSON(http.StatusOK, []*prommetrics.Target{})
}
var (
resp = &prommetrics.RespTarget{
Targets: make([]string, 0, len(eResp.Kvs)),
}
)
for i := range eResp.Kvs {
var target prommetrics.Target
err = json.Unmarshal(eResp.Kvs[i].Value, &target)
if err != nil {
log.ZError(c, "prometheus unmarshal err", errs.Wrap(err))
}
resp.Targets = append(resp.Targets, target.Target)
if resp.Labels == nil {
resp.Labels = target.Labels
}
}
c.JSON(200, []*prommetrics.RespTarget{resp})
}
func (p *PrometheusDiscoveryApi) Api(c *gin.Context) {
p.discovery(c, prommetrics.APIKeyName)
}
func (p *PrometheusDiscoveryApi) User(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.User)
}
func (p *PrometheusDiscoveryApi) Group(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Group)
}
func (p *PrometheusDiscoveryApi) Msg(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Msg)
}
func (p *PrometheusDiscoveryApi) Friend(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Friend)
}
func (p *PrometheusDiscoveryApi) Conversation(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Conversation)
}
func (p *PrometheusDiscoveryApi) Third(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Third)
}
func (p *PrometheusDiscoveryApi) Auth(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Auth)
}
func (p *PrometheusDiscoveryApi) Push(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.Push)
}
func (p *PrometheusDiscoveryApi) MessageGateway(c *gin.Context) {
p.discovery(c, p.config.Discovery.RpcService.MessageGateway)
}
func (p *PrometheusDiscoveryApi) MessageTransfer(c *gin.Context) {
p.discovery(c, prommetrics.MessageTransferKeyName)
}

View File

@ -48,7 +48,7 @@ func prommetricsGin() gin.HandlerFunc {
} }
} }
func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine { func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config, client discovery.SvcDiscoveryRegistry) *gin.Engine {
disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
@ -78,6 +78,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
u := NewUserApi(*userRpc) u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID) m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
j := jssdk.NewJSSdkApi(userRpc.Client, friendRpc.Client, groupRpc.Client, messageRpc.Client, conversationRpc.Client) j := jssdk.NewJSSdkApi(userRpc.Client, friendRpc.Client, groupRpc.Client, messageRpc.Client, conversationRpc.Client)
pd := NewPrometheusDiscoveryApi(config, client)
userRouterGroup := r.Group("/user") userRouterGroup := r.Group("/user")
{ {
userRouterGroup.POST("/user_register", u.UserRegister) userRouterGroup.POST("/user_register", u.UserRegister)
@ -254,6 +255,19 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
jssdk.POST("/get_conversations", j.GetConversations) jssdk.POST("/get_conversations", j.GetConversations)
jssdk.POST("/get_active_conversations", j.GetActiveConversations) jssdk.POST("/get_active_conversations", j.GetActiveConversations)
proDiscoveryGroup := r.Group("/prometheus_discovery", pd.Enable)
proDiscoveryGroup.GET("/api", pd.Api)
proDiscoveryGroup.GET("/user", pd.User)
proDiscoveryGroup.GET("/group", pd.Group)
proDiscoveryGroup.GET("/msg", pd.Msg)
proDiscoveryGroup.GET("/friend", pd.Friend)
proDiscoveryGroup.GET("/conversation", pd.Conversation)
proDiscoveryGroup.GET("/third", pd.Third)
proDiscoveryGroup.GET("/auth", pd.Auth)
proDiscoveryGroup.GET("/push", pd.Push)
proDiscoveryGroup.GET("/msg_gateway", pd.MessageGateway)
proDiscoveryGroup.GET("/msg_transfer", pd.MessageTransfer)
return r return r
} }

View File

@ -41,7 +41,7 @@ type Config struct {
func Start(ctx context.Context, index int, conf *Config) error { func Start(ctx context.Context, index int, conf *Config) error {
conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment() conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv, "autoSetPorts", conf.MsgGateway.RPC.AutoSetPorts, log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv,
"rpcPorts", conf.MsgGateway.RPC.Ports, "rpcPorts", conf.MsgGateway.RPC.Ports,
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports) "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index) wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)

View File

@ -18,11 +18,17 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"strconv"
"syscall" "syscall"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/utils/jsonutil"
"github.com/openimsdk/tools/utils/network"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
@ -33,6 +39,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller" "github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient" "github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
@ -140,21 +147,67 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
return err return err
} }
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, m.runTimeEnv)
if err != nil {
return errs.WrapMsg(err, "failed to register discovery service")
}
registerIP, err := network.GetRpcRegisterIP("")
if err != nil {
return err
}
getAutoPort := func() (net.Listener, int, error) {
registerAddr := net.JoinHostPort(registerIP, "0")
listener, err := net.Listen("tcp", registerAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if config.MsgTransfer.Prometheus.AutoSetPorts && config.Discovery.Enable != kdisc.Etcd {
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
}
if config.MsgTransfer.Prometheus.Enable { if config.MsgTransfer.Prometheus.Enable {
var (
listener net.Listener
prometheusPort int
)
if config.MsgTransfer.Prometheus.AutoSetPorts {
listener, prometheusPort, err = getAutoPort()
if err != nil {
return err
}
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(context.TODO(), prommetrics.BuildDiscoveryKey(prommetrics.MessageTransferKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
if err != nil {
return err
}
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
}
}
go func() { go func() {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
mw.PanicStackToLog(m.ctx, r) mw.PanicStackToLog(m.ctx, r)
} }
}() }()
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index) if err := prommetrics.TransferInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
if err != nil {
netErr = err
netDone <- struct{}{}
return
}
if err := prommetrics.TransferInit(prometheusPort); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort) netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{} netDone <- struct{}{}
} }

View File

@ -18,11 +18,10 @@ import (
"strings" "strings"
"time" "time"
"github.com/openimsdk/tools/s3/aws"
"github.com/openimsdk/tools/db/mongoutil" "github.com/openimsdk/tools/db/mongoutil"
"github.com/openimsdk/tools/db/redisutil" "github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/mq/kafka" "github.com/openimsdk/tools/mq/kafka"
"github.com/openimsdk/tools/s3/aws"
"github.com/openimsdk/tools/s3/cos" "github.com/openimsdk/tools/s3/cos"
"github.com/openimsdk/tools/s3/kodo" "github.com/openimsdk/tools/s3/kodo"
"github.com/openimsdk/tools/s3/minio" "github.com/openimsdk/tools/s3/minio"
@ -109,6 +108,7 @@ type API struct {
} `mapstructure:"api"` } `mapstructure:"api"`
Prometheus struct { Prometheus struct {
Enable bool `mapstructure:"enable"` Enable bool `mapstructure:"enable"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"` Ports []int `mapstructure:"ports"`
GrafanaURL string `mapstructure:"grafanaURL"` GrafanaURL string `mapstructure:"grafanaURL"`
} `mapstructure:"prometheus"` } `mapstructure:"prometheus"`
@ -193,7 +193,11 @@ type MsgGateway struct {
} }
type MsgTransfer struct { type MsgTransfer struct {
Prometheus Prometheus `mapstructure:"prometheus"` Prometheus struct {
Enable bool `mapstructure:"enable"`
AutoSetPorts bool `mapstructure:"autoSetPorts"`
Ports []int `mapstructure:"ports"`
} `mapstructure:"prometheus"`
} }
type Push struct { type Push struct {

View File

@ -83,3 +83,11 @@ func TestLoadOpenIMThirdConfig(t *testing.T) {
// Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true // Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true
} }
func TestTransferConfig(t *testing.T) {
var tran MsgTransfer
err := LoadConfig("../../../config/openim-msgtransfer.yml", "IMENV_OPENIM-MSGTRANSFER", &tran)
assert.Nil(t, err)
assert.Equal(t, true, tran.Prometheus.Enable)
assert.Equal(t, true, tran.Prometheus.AutoSetPorts)
}

View File

@ -26,6 +26,10 @@ import (
"github.com/openimsdk/tools/errs" "github.com/openimsdk/tools/errs"
) )
const (
Etcd = "etcd"
)
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type. // NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string) (discovery.SvcDiscoveryRegistry, error) { func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string) (discovery.SvcDiscoveryRegistry, error) {
if runtimeEnv == "kubernetes" { if runtimeEnv == "kubernetes" {
@ -35,7 +39,7 @@ func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string) (disco
switch discovery.Enable { switch discovery.Enable {
case "kubernetes": case "kubernetes":
return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace) return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace)
case "etcd": case Etcd:
return etcd.NewSvcDiscoveryRegistry( return etcd.NewSvcDiscoveryRegistry(
discovery.Etcd.RootDirectory, discovery.Etcd.RootDirectory,
discovery.Etcd.Address, discovery.Etcd.Address,

View File

@ -3,6 +3,7 @@ package prommetrics
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
"strconv" "strconv"
) )
@ -23,14 +24,14 @@ var (
) )
) )
func ApiInit(prometheusPort int) error { func ApiInit(listener net.Listener) error {
apiRegistry := prometheus.NewRegistry() apiRegistry := prometheus.NewRegistry()
cs := append( cs := append(
baseCollector, baseCollector,
apiCounter, apiCounter,
httpCounter, httpCounter,
) )
return Init(apiRegistry, prometheusPort, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...) return Init(apiRegistry, listener, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
} }
func APICall(path string, method string, apiCode int) { func APICall(path string, method string, apiCode int) {

View File

@ -0,0 +1,31 @@
package prommetrics
import "fmt"
const (
APIKeyName = "api"
MessageTransferKeyName = "message-transfer"
)
type Target struct {
Target string `json:"target"`
Labels map[string]string `json:"labels"`
}
type RespTarget struct {
Targets []string `json:"targets"`
Labels map[string]string `json:"labels"`
}
func BuildDiscoveryKey(name string) string {
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
}
func BuildDefaultTarget(host string, ip int) Target {
return Target{
Target: fmt.Sprintf("%s:%d", host, ip),
Labels: map[string]string{
"namespace": "default",
},
}
}

View File

@ -15,9 +15,9 @@
package prommetrics package prommetrics
import ( import (
"fmt"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
"net"
"net/http" "net/http"
) )
@ -30,9 +30,9 @@ var (
} }
) )
func Init(registry *prometheus.Registry, prometheusPort int, path string, handler http.Handler, cs ...prometheus.Collector) error { func Init(registry *prometheus.Registry, listener net.Listener, path string, handler http.Handler, cs ...prometheus.Collector) error {
registry.MustRegister(cs...) registry.MustRegister(cs...)
srv := http.NewServeMux() srv := http.NewServeMux()
srv.Handle(path, handler) srv.Handle(path, handler)
return http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), srv) return http.Serve(listener, srv)
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
"strconv" "strconv"
) )
@ -21,13 +22,13 @@ var (
) )
) )
func RpcInit(cs []prometheus.Collector, prometheusPort int) error { func RpcInit(cs []prometheus.Collector, listener net.Listener) error {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
cs = append(append( cs = append(append(
baseCollector, baseCollector,
rpcCounter, rpcCounter,
), cs...) ), cs...)
return Init(reg, prometheusPort, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...) return Init(reg, listener, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
} }
func RPCCall(name string, path string, code int) { func RPCCall(name string, path string, code int) {

View File

@ -17,6 +17,7 @@ package prommetrics
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"net"
) )
var ( var (
@ -42,7 +43,7 @@ var (
}) })
) )
func TransferInit(prometheusPort int) error { func TransferInit(listener net.Listener) error {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
cs := append( cs := append(
baseCollector, baseCollector,
@ -52,5 +53,5 @@ func TransferInit(prometheusPort int) error {
MsgInsertMongoFailedCounter, MsgInsertMongoFailedCounter,
SeqSetFailedCounter, SeqSetFailedCounter,
) )
return Init(reg, prometheusPort, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...) return Init(reg, listener, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
} }

View File

@ -22,15 +22,17 @@ import (
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"strconv"
"syscall" "syscall"
"time" "time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config" "github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/tools/discovery/etcd"
"github.com/openimsdk/tools/utils/datautil" "github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/runtimeenv" "github.com/openimsdk/tools/utils/jsonutil"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"strconv" "github.com/openimsdk/tools/utils/runtimeenv"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister" kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics" "github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
@ -52,8 +54,14 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
rpcTcpAddr string rpcTcpAddr string
netDone = make(chan struct{}, 2) netDone = make(chan struct{}, 2)
netErr error netErr error
prometheusPort int
) )
registerIP, err := network.GetRpcRegisterIP(registerIP)
if err != nil {
return err
}
runTimeEnv := runtimeenv.PrintRuntimeEnvironment() runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
if !autoSetPorts { if !autoSetPorts {
@ -66,6 +74,27 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), "0") rpcTcpAddr = net.JoinHostPort(network.GetListenIP(listenIP), "0")
} }
getAutoPort := func() (net.Listener, int, error) {
listener, err := net.Listen("tcp", rpcTcpAddr)
if err != nil {
return nil, 0, errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
port, _ := strconv.Atoi(portStr)
return listener, port, nil
}
if autoSetPorts && discovery.Enable != kdisc.Etcd {
return errs.New("only etcd support autoSetPorts", "rpcRegisterName", rpcRegisterName).Wrap()
}
client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv)
if err != nil {
return err
}
defer client.Close()
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
// var reg *prometheus.Registry // var reg *prometheus.Registry
// var metric *grpcprometheus.ServerMetrics // var metric *grpcprometheus.ServerMetrics
if prometheusConfig.Enable { if prometheusConfig.Enable {
@ -78,17 +107,40 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
prommetricsUnaryInterceptor(rpcRegisterName), prommetricsUnaryInterceptor(rpcRegisterName),
prommetricsStreamInterceptor(rpcRegisterName), prommetricsStreamInterceptor(rpcRegisterName),
) )
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
var (
listener net.Listener
)
if autoSetPorts {
listener, prometheusPort, err = getAutoPort()
if err != nil { if err != nil {
return err return err
} }
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(rpcRegisterName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
if err != nil {
return errs.WrapMsg(err, "etcd put err")
}
} else {
prometheusPort, err = datautil.GetElemByIndex(prometheusConfig.Ports, index)
if err != nil {
return err
}
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
}
cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, discovery) cs := prommetrics.GetGrpcCusMetrics(rpcRegisterName, discovery)
go func() { go func() {
if err := prommetrics.RpcInit(cs, prometheusPort); err != nil && !errors.Is(err, http.ErrServerClosed) { if err := prommetrics.RpcInit(cs, listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort)) netErr = errs.WrapMsg(err, fmt.Sprintf("rpc %s prometheus start err: %d", rpcRegisterName, prometheusPort))
netDone <- struct{}{} netDone <- struct{}{}
} }
// metric.InitializeMetrics(srv) //metric.InitializeMetrics(srv)
// Create a HTTP server for prometheus. // Create a HTTP server for prometheus.
// httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)} // httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
// if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { // if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
@ -100,30 +152,15 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
options = append(options, mw.GrpcServer()) options = append(options, mw.GrpcServer())
} }
listener, err := net.Listen( listener, port, err := getAutoPort()
"tcp",
rpcTcpAddr,
)
if err != nil {
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
}
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
registerIP = network.GetListenIP(registerIP)
port, _ := strconv.Atoi(portStr)
log.CInfo(ctx, "RPC server is initializing", "runTimeEnv", runTimeEnv, "rpcRegisterName", rpcRegisterName, "rpcPort", portStr,
"prometheusPorts", prometheusConfig.Ports)
defer listener.Close()
client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv)
if err != nil { if err != nil {
return err return err
} }
defer client.Close() log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", port,
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin"))) "prometheusPort", prometheusPort)
defer listener.Close()
srv := grpc.NewServer(options...) srv := grpc.NewServer(options...)
err = rpcFn(ctx, config, client, srv) err = rpcFn(ctx, config, client, srv)