mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-04-25 19:22:46 +08:00
prometheus for statistics
This commit is contained in:
parent
266a2b4c76
commit
ebdb875c56
@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import "Open_IM/pkg/common/db"
|
||||
|
||||
func main() {
|
||||
db.DB.BatchInsertChat()
|
||||
}
|
||||
//
|
||||
//func main() {
|
||||
// db.DB.BatchInsertChat()
|
||||
//}
|
||||
|
@ -18,13 +18,7 @@ import (
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
msgInsertMysqlCounter prometheus.Counter
|
||||
msgInsertFailedMysqlCounter prometheus.Counter
|
||||
promePkg "Open_IM/pkg/common/prometheus"
|
||||
)
|
||||
|
||||
type PersistentConsumerHandler struct {
|
||||
@ -38,32 +32,16 @@ func (pc *PersistentConsumerHandler) Init() {
|
||||
pc.persistentConsumerGroup = kfk.NewMConsumerGroup(&kfk.MConsumerGroupConfig{KafkaVersion: sarama.V2_0_0_0,
|
||||
OffsetsInitial: sarama.OffsetNewest, IsReturnErr: false}, []string{config.Config.Kafka.Ws2mschat.Topic},
|
||||
config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.ConsumerGroupID.MsgToMySql)
|
||||
if config.Config.Prometheus.Enable {
|
||||
pc.initPrometheus()
|
||||
}
|
||||
pc.initPrometheus()
|
||||
}
|
||||
|
||||
func (pc *PersistentConsumerHandler) initPrometheus() {
|
||||
// counter
|
||||
msgInsertMysqlCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "insert_mysql_msg_total",
|
||||
Help: "The total number of msg insert mysql events",
|
||||
})
|
||||
msgInsertFailedMysqlCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "insert_mysql_failed_msg_total",
|
||||
Help: "The total number of msg insert mysql events",
|
||||
})
|
||||
|
||||
// 启动计时器
|
||||
// requestDurations := prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
// Name: "http_request_duration_seconds",
|
||||
// Help: "A histogram of the HTTP request durations in seconds.",
|
||||
// Buckets: []float64{0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
|
||||
// })
|
||||
// 开始
|
||||
// timer := prometheus.NewTimer(requestDurations)
|
||||
// 停止
|
||||
// timer.ObserveDuration()
|
||||
promePkg.NewSeqGetSuccessCounter()
|
||||
promePkg.NewSeqGetFailedCounter()
|
||||
promePkg.NewSeqSetSuccessCounter()
|
||||
promePkg.NewSeqSetFailedCounter()
|
||||
promePkg.NewMsgInsertRedisSuccessCounter()
|
||||
promePkg.NewMsgInsertRedisFailedCounter()
|
||||
}
|
||||
|
||||
func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMessage, msgKey string, _ sarama.ConsumerGroupSession) {
|
||||
@ -97,14 +75,8 @@ func (pc *PersistentConsumerHandler) handleChatWs2Mysql(cMsg *sarama.ConsumerMes
|
||||
log.NewInfo(msgFromMQ.OperationID, "msg_transfer msg persisting", string(msg))
|
||||
if err = im_mysql_msg_model.InsertMessageToChatLog(msgFromMQ); err != nil {
|
||||
log.NewError(msgFromMQ.OperationID, "Message insert failed", "err", err.Error(), "msg", msgFromMQ.String())
|
||||
if config.Config.Prometheus.Enable {
|
||||
msgInsertFailedMysqlCounter.Inc()
|
||||
}
|
||||
return
|
||||
}
|
||||
if config.Config.Prometheus.Enable {
|
||||
msgInsertMysqlCounter.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func (d *DataBases) GetMessageListBySeq(userID string, seqList []uint32, operati
|
||||
return seqMsg, failedSeqList, errResult
|
||||
}
|
||||
|
||||
func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error {
|
||||
func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) (error, int) {
|
||||
ctx := context.Background()
|
||||
pipe := d.RDB.Pipeline()
|
||||
var failedList []pbChat.MsgDataToMQ
|
||||
@ -225,10 +225,10 @@ func (d *DataBases) SetMessageToCache(msgList []*pbChat.MsgDataToMQ, uid string,
|
||||
}
|
||||
}
|
||||
if len(failedList) != 0 {
|
||||
return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID))
|
||||
return errors.New(fmt.Sprintf("set msg to cache failed, failed lists: %q,%s", failedList, operationID)), len(failedList)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
return err
|
||||
return err, 0
|
||||
}
|
||||
func (d *DataBases) DeleteMessageFromCache(msgList []*pbChat.MsgDataToMQ, uid string, operationID string) error {
|
||||
ctx := context.Background()
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"Open_IM/pkg/common/config"
|
||||
"Open_IM/pkg/common/constant"
|
||||
"Open_IM/pkg/common/log"
|
||||
promePkg "Open_IM/pkg/common/prometheus"
|
||||
pbMsg "Open_IM/pkg/proto/msg"
|
||||
"Open_IM/pkg/utils"
|
||||
"context"
|
||||
@ -129,8 +130,11 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
|
||||
log.Debug(operationID, "constant.SingleChatType lastMaxSeq before add ", currentMaxSeq, "userID ", insertID, err)
|
||||
}
|
||||
if err != nil && err != go_redis.Nil {
|
||||
promePkg.PromeInc(promePkg.SeqGetFailedCounter)
|
||||
return utils.Wrap(err, ""), 0
|
||||
}
|
||||
promePkg.PromeInc(promePkg.SeqGetSuccessCounter)
|
||||
promePkg.SeqGetSuccessCounter.Inc()
|
||||
|
||||
lastMaxSeq := currentMaxSeq
|
||||
for _, m := range msgList {
|
||||
@ -142,9 +146,12 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
|
||||
log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", insertID, "seq: ", currentMaxSeq)
|
||||
}
|
||||
log.Debug(operationID, "SetMessageToCache ", insertID, len(msgList))
|
||||
err = d.SetMessageToCache(msgList, insertID, operationID)
|
||||
err, failedNum := d.SetMessageToCache(msgList, insertID, operationID)
|
||||
if err != nil {
|
||||
promePkg.PromeAdd(promePkg.MsgInsertRedisFailedCounter, failedNum)
|
||||
log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), insertID)
|
||||
} else {
|
||||
promePkg.PromeInc(promePkg.MsgInsertRedisSuccessCounter)
|
||||
}
|
||||
log.Debug(operationID, "batch to redis cost time ", getCurrentTimestampByMill()-newTime, insertID, len(msgList))
|
||||
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
|
||||
@ -152,6 +159,11 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
|
||||
} else {
|
||||
err = d.SetUserMaxSeq(insertID, currentMaxSeq)
|
||||
}
|
||||
if err != nil {
|
||||
promePkg.PromeInc(promePkg.SeqSetFailedCounter)
|
||||
} else {
|
||||
promePkg.PromeInc(promePkg.SeqSetSuccessCounter)
|
||||
}
|
||||
return utils.Wrap(err, ""), lastMaxSeq
|
||||
}
|
||||
|
||||
@ -171,99 +183,99 @@ func (d *DataBases) BatchInsertChat2Cache(insertID string, msgList []*pbMsg.MsgD
|
||||
// }
|
||||
// return nil, lastMaxSeq
|
||||
//}
|
||||
|
||||
func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error {
|
||||
newTime := getCurrentTimestampByMill()
|
||||
if len(msgList) > GetSingleGocMsgNum() {
|
||||
return errors.New("too large")
|
||||
}
|
||||
isInit := false
|
||||
currentMaxSeq, err := d.GetUserMaxSeq(userID)
|
||||
if err == nil {
|
||||
|
||||
} else if err == go_redis.Nil {
|
||||
isInit = true
|
||||
currentMaxSeq = 0
|
||||
} else {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
var remain uint64
|
||||
//if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
|
||||
// remain = uint64(GetSingleGocMsgNum()-1) - (currentMaxSeq % uint64(GetSingleGocMsgNum()))
|
||||
//} else {
|
||||
// remain = uint64(GetSingleGocMsgNum()) - ((currentMaxSeq - (uint64(GetSingleGocMsgNum()) - 1)) % uint64(GetSingleGocMsgNum()))
|
||||
//}
|
||||
|
||||
blk0 := uint64(GetSingleGocMsgNum() - 1)
|
||||
if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
|
||||
remain = blk0 - currentMaxSeq
|
||||
} else {
|
||||
excludeBlk0 := currentMaxSeq - blk0
|
||||
remain = (uint64(GetSingleGocMsgNum()) - (excludeBlk0 % uint64(GetSingleGocMsgNum()))) % uint64(GetSingleGocMsgNum())
|
||||
}
|
||||
|
||||
insertCounter := uint64(0)
|
||||
msgListToMongo := make([]MsgInfo, 0)
|
||||
msgListToMongoNext := make([]MsgInfo, 0)
|
||||
seqUid := ""
|
||||
seqUidNext := ""
|
||||
log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList))
|
||||
//4998 remain ==1
|
||||
//4999
|
||||
for _, m := range msgList {
|
||||
log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
|
||||
currentMaxSeq++
|
||||
sMsg := MsgInfo{}
|
||||
sMsg.SendTime = m.MsgData.SendTime
|
||||
m.MsgData.Seq = uint32(currentMaxSeq)
|
||||
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
if isInit {
|
||||
msgListToMongoNext = append(msgListToMongoNext, sMsg)
|
||||
seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
continue
|
||||
}
|
||||
if insertCounter < remain {
|
||||
msgListToMongo = append(msgListToMongo, sMsg)
|
||||
insertCounter++
|
||||
seqUid = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
} else {
|
||||
msgListToMongoNext = append(msgListToMongoNext, sMsg)
|
||||
seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
}
|
||||
}
|
||||
// ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
|
||||
|
||||
ctx := context.Background()
|
||||
c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
|
||||
|
||||
if seqUid != "" {
|
||||
filter := bson.M{"uid": seqUid}
|
||||
log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
|
||||
err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
|
||||
if err != nil {
|
||||
log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
}
|
||||
if seqUidNext != "" {
|
||||
filter := bson.M{"uid": seqUidNext}
|
||||
sChat := UserChat{}
|
||||
sChat.UID = seqUidNext
|
||||
sChat.Msg = msgListToMongoNext
|
||||
log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext)
|
||||
if _, err = c.InsertOne(ctx, &sChat); err != nil {
|
||||
log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
|
||||
return utils.Wrap(err, "")
|
||||
}
|
||||
}
|
||||
log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList))
|
||||
return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), "")
|
||||
}
|
||||
//
|
||||
//func (d *DataBases) BatchInsertChat(userID string, msgList []*pbMsg.MsgDataToMQ, operationID string) error {
|
||||
// newTime := getCurrentTimestampByMill()
|
||||
// if len(msgList) > GetSingleGocMsgNum() {
|
||||
// return errors.New("too large")
|
||||
// }
|
||||
// isInit := false
|
||||
// currentMaxSeq, err := d.GetUserMaxSeq(userID)
|
||||
// if err == nil {
|
||||
//
|
||||
// } else if err == go_redis.Nil {
|
||||
// isInit = true
|
||||
// currentMaxSeq = 0
|
||||
// } else {
|
||||
// return utils.Wrap(err, "")
|
||||
// }
|
||||
// var remain uint64
|
||||
// //if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
|
||||
// // remain = uint64(GetSingleGocMsgNum()-1) - (currentMaxSeq % uint64(GetSingleGocMsgNum()))
|
||||
// //} else {
|
||||
// // remain = uint64(GetSingleGocMsgNum()) - ((currentMaxSeq - (uint64(GetSingleGocMsgNum()) - 1)) % uint64(GetSingleGocMsgNum()))
|
||||
// //}
|
||||
//
|
||||
// blk0 := uint64(GetSingleGocMsgNum() - 1)
|
||||
// if currentMaxSeq < uint64(GetSingleGocMsgNum()) {
|
||||
// remain = blk0 - currentMaxSeq
|
||||
// } else {
|
||||
// excludeBlk0 := currentMaxSeq - blk0
|
||||
// remain = (uint64(GetSingleGocMsgNum()) - (excludeBlk0 % uint64(GetSingleGocMsgNum()))) % uint64(GetSingleGocMsgNum())
|
||||
// }
|
||||
//
|
||||
// insertCounter := uint64(0)
|
||||
// msgListToMongo := make([]MsgInfo, 0)
|
||||
// msgListToMongoNext := make([]MsgInfo, 0)
|
||||
// seqUid := ""
|
||||
// seqUidNext := ""
|
||||
// log.Debug(operationID, "remain ", remain, "insertCounter ", insertCounter, "currentMaxSeq ", currentMaxSeq, userID, len(msgList))
|
||||
// //4998 remain ==1
|
||||
// //4999
|
||||
// for _, m := range msgList {
|
||||
// log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
|
||||
// currentMaxSeq++
|
||||
// sMsg := MsgInfo{}
|
||||
// sMsg.SendTime = m.MsgData.SendTime
|
||||
// m.MsgData.Seq = uint32(currentMaxSeq)
|
||||
// if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
|
||||
// return utils.Wrap(err, "")
|
||||
// }
|
||||
// if isInit {
|
||||
// msgListToMongoNext = append(msgListToMongoNext, sMsg)
|
||||
// seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
// log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
// continue
|
||||
// }
|
||||
// if insertCounter < remain {
|
||||
// msgListToMongo = append(msgListToMongo, sMsg)
|
||||
// insertCounter++
|
||||
// seqUid = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
// log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
// } else {
|
||||
// msgListToMongoNext = append(msgListToMongoNext, sMsg)
|
||||
// seqUidNext = getSeqUid(userID, uint32(currentMaxSeq))
|
||||
// log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain)
|
||||
// }
|
||||
// }
|
||||
// // ctx, _ := context.WithTimeout(context.Background(), time.Duration(config.Config.Mongo.DBTimeout)*time.Second)
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// c := d.mongoClient.Database(config.Config.Mongo.DBDatabase).Collection(cChat)
|
||||
//
|
||||
// if seqUid != "" {
|
||||
// filter := bson.M{"uid": seqUid}
|
||||
// log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo)
|
||||
// err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgListToMongo}}}).Err()
|
||||
// if err != nil {
|
||||
// log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
|
||||
// return utils.Wrap(err, "")
|
||||
// }
|
||||
// }
|
||||
// if seqUidNext != "" {
|
||||
// filter := bson.M{"uid": seqUidNext}
|
||||
// sChat := UserChat{}
|
||||
// sChat.UID = seqUidNext
|
||||
// sChat.Msg = msgListToMongoNext
|
||||
// log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext)
|
||||
// if _, err = c.InsertOne(ctx, &sChat); err != nil {
|
||||
// log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
|
||||
// return utils.Wrap(err, "")
|
||||
// }
|
||||
// }
|
||||
// log.NewWarn(operationID, "batch mgo cost time ", getCurrentTimestampByMill()-newTime, userID, len(msgList))
|
||||
// return utils.Wrap(d.SetUserMaxSeq(userID, uint64(currentMaxSeq)), "")
|
||||
//}
|
||||
|
||||
//func (d *DataBases)setMessageToCache(msgList []*pbMsg.MsgDataToMQ, uid string) (err error) {
|
||||
//
|
||||
|
@ -71,7 +71,7 @@ func Test_NewSetMessageToCache(t *testing.T) {
|
||||
data.AtUserIDList = []string{"1212", "23232"}
|
||||
msg.MsgData = &data
|
||||
messageList := []*pbChat.MsgDataToMQ{&msg}
|
||||
err := DB.SetMessageToCache(messageList, uid, "cacheTest")
|
||||
err, _ := DB.SetMessageToCache(messageList, uid, "cacheTest")
|
||||
assert.Nil(t, err)
|
||||
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// user rpc
|
||||
UserLoginCounter prometheus.Counter
|
||||
UserRegisterCounter prometheus.Counter
|
||||
|
||||
@ -14,6 +13,9 @@ var (
|
||||
SeqGetFailedCounter prometheus.Counter
|
||||
SeqSetSuccessCounter prometheus.Counter
|
||||
SeqSetFailedCounter prometheus.Counter
|
||||
|
||||
MsgInsertRedisSuccessCounter prometheus.Counter
|
||||
MsgInsertRedisFailedCounter prometheus.Counter
|
||||
)
|
||||
|
||||
func NewUserLoginCounter() {
|
||||
@ -22,6 +24,12 @@ func NewUserLoginCounter() {
|
||||
Help: "The number of user login",
|
||||
})
|
||||
}
|
||||
func NewUserRegisterCounter() {
|
||||
UserRegisterCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_register",
|
||||
Help: "The number of user register",
|
||||
})
|
||||
}
|
||||
|
||||
func NewSeqGetSuccessCounter() {
|
||||
SeqGetSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@ -48,3 +56,17 @@ func NewSeqSetFailedCounter() {
|
||||
Help: "The number of failed set seq",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertRedisSuccessCounter() {
|
||||
MsgInsertRedisSuccessCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_success",
|
||||
Help: "The number of successful insert msg to redis",
|
||||
})
|
||||
}
|
||||
|
||||
func NewMsgInsertRedisFailedCounter() {
|
||||
MsgInsertRedisFailedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_failed",
|
||||
Help: "The number of failed insert msg to redis",
|
||||
})
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user