mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-11-05 21:02:11 +08:00
fix: read prometheus port when flag set to enable and prevent failure during startup.
This commit is contained in:
parent
4f3243d8a7
commit
3388513b0d
@ -51,10 +51,6 @@ func Start(ctx context.Context, index int, config *Config) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prometheusPort, err := datautil.GetElemByIndex(config.RpcConfig.Prometheus.Ports, index)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var client discovery.SvcDiscoveryRegistry
|
var client discovery.SvcDiscoveryRegistry
|
||||||
|
|
||||||
@ -67,11 +63,18 @@ func Start(ctx context.Context, index int, config *Config) error {
|
|||||||
var (
|
var (
|
||||||
netDone = make(chan struct{}, 1)
|
netDone = make(chan struct{}, 1)
|
||||||
netErr error
|
netErr error
|
||||||
|
prometheusPort int
|
||||||
)
|
)
|
||||||
|
|
||||||
router := newGinRouter(client, config)
|
router := newGinRouter(client, config)
|
||||||
if config.RpcConfig.Prometheus.Enable {
|
if config.RpcConfig.Prometheus.Enable {
|
||||||
go func() {
|
go func() {
|
||||||
|
prometheusPort, err = datautil.GetElemByIndex(config.RpcConfig.Prometheus.Ports, index)
|
||||||
|
if err != nil {
|
||||||
|
netErr = err
|
||||||
|
netDone <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
|
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
|
||||||
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
|
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
|
||||||
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
|
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
|
||||||
|
|||||||
@ -47,7 +47,6 @@ func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
|||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
rpcPort int
|
rpcPort int
|
||||||
prometheusPort int
|
|
||||||
LongConnServer LongConnServer
|
LongConnServer LongConnServer
|
||||||
config *Config
|
config *Config
|
||||||
pushTerminal map[int]struct{}
|
pushTerminal map[int]struct{}
|
||||||
@ -57,10 +56,9 @@ func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
|
|||||||
s.LongConnServer = LongConnServer
|
s.LongConnServer = LongConnServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServer(rpcPort int, proPort int, longConnServer LongConnServer, conf *Config) *Server {
|
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
|
||||||
s := &Server{
|
s := &Server{
|
||||||
rpcPort: rpcPort,
|
rpcPort: rpcPort,
|
||||||
prometheusPort: proPort,
|
|
||||||
LongConnServer: longConnServer,
|
LongConnServer: longConnServer,
|
||||||
pushTerminal: make(map[int]struct{}),
|
pushTerminal: make(map[int]struct{}),
|
||||||
config: conf,
|
config: conf,
|
||||||
|
|||||||
@ -38,10 +38,6 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prometheusPort, err := datautil.GetElemByIndex(conf.MsgGateway.Prometheus.Ports, index)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rpcPort, err := datautil.GetElemByIndex(conf.MsgGateway.RPC.Ports, index)
|
rpcPort, err := datautil.GetElemByIndex(conf.MsgGateway.RPC.Ports, index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -57,7 +53,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
hubServer := NewServer(rpcPort, prometheusPort, longServer, conf)
|
hubServer := NewServer(rpcPort, longServer, conf)
|
||||||
netDone := make(chan error)
|
netDone := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err = hubServer.Start(ctx, index, conf)
|
err = hubServer.Start(ctx, index, conf)
|
||||||
|
|||||||
@ -44,13 +44,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type MsgTransfer struct {
|
type MsgTransfer struct {
|
||||||
// This consumer aggregated messages, subscribed to the topic:ws2ms_chat,
|
// This consumer aggregated messages, subscribed to the topic:toRedis,
|
||||||
// the modification notification is sent to msg_to_modify topic, the message is stored in redis, Incr Redis,
|
// the modification notification is sent to msg_to_modify topic, the message is stored in redis, Incr Redis,
|
||||||
// and then the message is sent to ms2pschat topic for push, and the message is sent to msg_to_mongo topic for persistence
|
// and then the message is sent to ms2pschat topic for push, and the message is sent to toMongo topic for persistence
|
||||||
historyCH *OnlineHistoryRedisConsumerHandler
|
historyCH *OnlineHistoryRedisConsumerHandler
|
||||||
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
||||||
// mongoDB batch insert, delete messages in redis after success,
|
// mongoDB batch insert, delete messages in redis after success,
|
||||||
// and handle the deletion notification message deleted subscriptions topic: msg_to_mongo
|
// and handle the deletion notification message deleted subscriptions topic: to
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
@ -82,7 +82,6 @@ func Start(ctx context.Context, index int, config *Config) error {
|
|||||||
}
|
}
|
||||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||||
//todo MsgCacheTimeout
|
|
||||||
msgModel := cache.NewMsgCache(rdb, config.RedisConfig.EnablePipeline)
|
msgModel := cache.NewMsgCache(rdb, config.RedisConfig.EnablePipeline)
|
||||||
seqModel := cache.NewSeqCache(rdb)
|
seqModel := cache.NewSeqCache(rdb)
|
||||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
||||||
@ -111,10 +110,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *MsgTransfer) Start(index int, config *Config) error {
|
func (m *MsgTransfer) Start(index int, config *Config) error {
|
||||||
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.ctx, m.cancel = context.WithCancel(context.Background())
|
m.ctx, m.cancel = context.WithCancel(context.Background())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -124,20 +120,26 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
|
|||||||
|
|
||||||
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH)
|
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH)
|
||||||
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH)
|
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH)
|
||||||
err = m.historyCH.redisMessageBatches.Start()
|
err := m.historyCH.redisMessageBatches.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.MsgTransfer.Prometheus.Enable {
|
if config.MsgTransfer.Prometheus.Enable {
|
||||||
go func() {
|
go func() {
|
||||||
|
prometheusPort, err := datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
||||||
|
if err != nil {
|
||||||
|
netErr = err
|
||||||
|
netDone <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
proreg := prometheus.NewRegistry()
|
proreg := prometheus.NewRegistry()
|
||||||
proreg.MustRegister(
|
proreg.MustRegister(
|
||||||
collectors.NewGoCollector(),
|
collectors.NewGoCollector(),
|
||||||
)
|
)
|
||||||
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
|
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
|
||||||
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
|
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
|
||||||
err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
|
err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
||||||
netDone <- struct{}{}
|
netDone <- struct{}{}
|
||||||
|
|||||||
@ -52,12 +52,9 @@ func Start[T any](ctx context.Context, zookeeperConfig *config2.ZooKeeper, prome
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
|
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
|
||||||
"prometheusPort", prometheusPort)
|
"prometheusPorts", prometheusConfig.Ports)
|
||||||
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
|
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
|
||||||
listener, err := net.Listen(
|
listener, err := net.Listen(
|
||||||
"tcp",
|
"tcp",
|
||||||
@ -117,9 +114,14 @@ func Start[T any](ctx context.Context, zookeeperConfig *config2.ZooKeeper, prome
|
|||||||
netErr error
|
netErr error
|
||||||
httpServer *http.Server
|
httpServer *http.Server
|
||||||
)
|
)
|
||||||
|
if prometheusConfig.Enable {
|
||||||
go func() {
|
go func() {
|
||||||
if prometheusConfig.Enable && prometheusPort != 0 {
|
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||||
|
if err != nil {
|
||||||
|
netErr = err
|
||||||
|
netDone <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
metric.InitializeMetrics(srv)
|
metric.InitializeMetrics(srv)
|
||||||
// Create a HTTP server for prometheus.
|
// Create a HTTP server for prometheus.
|
||||||
httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
httpServer = &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: fmt.Sprintf("0.0.0.0:%d", prometheusPort)}
|
||||||
@ -127,8 +129,8 @@ func Start[T any](ctx context.Context, zookeeperConfig *config2.ZooKeeper, prome
|
|||||||
netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
netErr = errs.WrapMsg(err, "prometheus start err", httpServer.Addr)
|
||||||
netDone <- struct{}{}
|
netDone <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := srv.Serve(listener)
|
err := srv.Serve(listener)
|
||||||
|
|||||||
@ -24,7 +24,7 @@ type Config struct {
|
|||||||
dataBuffer int // The size of the main data channel
|
dataBuffer int // The size of the main data channel
|
||||||
worker int // Number of coroutines processed in parallel
|
worker int // Number of coroutines processed in parallel
|
||||||
interval time.Duration // Time of message aggregations
|
interval time.Duration // Time of message aggregations
|
||||||
syncWait bool // Whether to wait synchronously after distributing messages
|
syncWait bool // Whether to wait synchronously after distributing messages have been consumed
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(c *Config)
|
type Option func(c *Config)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user