mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-12-02 10:18:45 +08:00
Merge branch 'fix/script' of github.com:luhaoling/open-im-server into fix/script
This commit is contained in:
commit
96024983e2
@ -53,12 +53,12 @@
|
||||
|
||||
## :busts_in_silhouette: Community
|
||||
|
||||
+ 📚 [OpenIM Community](https://github.com/OpenIMSDK/community)
|
||||
+ 💕 [OpenIM Interest Group](https://github.com/Openim-sigs)
|
||||
+ 💬 [Follow our Twitter account](https://twitter.com/founder_im63606)
|
||||
+ 👫 [Join our Reddit](https://www.reddit.com/r/OpenIMessaging)
|
||||
+ 🚀 [Join our Slack community](https://join.slack.com/t/openimsdk/shared_invite/zt-22720d66b-o_FvKxMTGXtcnnnHiMqe9Q)
|
||||
+ :eyes: [Join our wechat (微信群)](https://openim-1253691595.cos.ap-nanjing.myqcloud.com/WechatIMG20.jpeg)
|
||||
+ 👫 [Join our Reddit](https://www.reddit.com/r/OpenIMessaging)
|
||||
+ 💬 [Follow our Twitter account](https://twitter.com/founder_im63606)
|
||||
+ 📚 [OpenIM Community](https://github.com/OpenIMSDK/community)
|
||||
+ 💕 [OpenIM Interest Group](https://github.com/Openim-sigs)
|
||||
|
||||
## Ⓜ️ About OpenIM
|
||||
|
||||
|
||||
@ -78,16 +78,17 @@ type Client struct {
|
||||
token string
|
||||
}
|
||||
|
||||
func newClient(ctx *UserConnContext, conn LongConn, isCompress bool) *Client {
|
||||
return &Client{
|
||||
w: new(sync.Mutex),
|
||||
conn: conn,
|
||||
PlatformID: utils.StringToInt(ctx.GetPlatformID()),
|
||||
IsCompress: isCompress,
|
||||
UserID: ctx.GetUserID(),
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
// function not used
|
||||
// func newClient(ctx *UserConnContext, conn LongConn, isCompress bool) *Client {
|
||||
// return &Client{
|
||||
// w: new(sync.Mutex),
|
||||
// conn: conn,
|
||||
// PlatformID: utils.StringToInt(ctx.GetPlatformID()),
|
||||
// IsCompress: isCompress,
|
||||
// UserID: ctx.GetUserID(),
|
||||
// ctx: ctx,
|
||||
// }
|
||||
// }
|
||||
|
||||
// ResetClient updates the client's state with new connection and context information.
|
||||
func (c *Client) ResetClient(
|
||||
|
||||
@ -108,10 +108,12 @@ func (d *GWebSocket) Dial(urlStr string, requestHeader http.Header) (*http.Respo
|
||||
}
|
||||
|
||||
func (d *GWebSocket) IsNil() bool {
|
||||
if d.conn != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return d.conn == nil
|
||||
//
|
||||
// if d.conn != nil {
|
||||
// return false
|
||||
// }
|
||||
// return true
|
||||
}
|
||||
|
||||
func (d *GWebSocket) SetConnNil() {
|
||||
|
||||
@ -61,11 +61,12 @@ type LongConnServer interface {
|
||||
MessageHandler
|
||||
}
|
||||
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, 1024)
|
||||
},
|
||||
}
|
||||
// bufferPool is unused
|
||||
// var bufferPool = sync.Pool{
|
||||
// New: func() any {
|
||||
// return make([]byte, 1024)
|
||||
// },
|
||||
// }
|
||||
|
||||
type WsServer struct {
|
||||
port int
|
||||
|
||||
@ -58,12 +58,12 @@ func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
|
||||
func (u *UserMap) Set(key string, v *Client) {
|
||||
allClients, existed := u.m.Load(key)
|
||||
if existed {
|
||||
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client", *v)
|
||||
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
|
||||
oldClients := allClients.([]*Client)
|
||||
oldClients = append(oldClients, v)
|
||||
u.m.Store(key, oldClients)
|
||||
} else {
|
||||
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client", *v)
|
||||
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
|
||||
var clients []*Client
|
||||
clients = append(clients, v)
|
||||
u.m.Store(key, clients)
|
||||
|
||||
@ -71,7 +71,7 @@ func StartTransfer(prometheusPort int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err != nil {
|
||||
if err2 := client.CreateRpcRootNodes(config.Config.GetServiceNames()); err2 != nil {
|
||||
return err
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
|
||||
@ -74,10 +74,10 @@ type OnlineHistoryRedisConsumerHandler struct {
|
||||
chArrays [ChannelNum]chan Cmd2Value
|
||||
msgDistributionCh chan Cmd2Value
|
||||
|
||||
singleMsgSuccessCount uint64
|
||||
singleMsgFailedCount uint64
|
||||
singleMsgSuccessCountMutex sync.Mutex
|
||||
singleMsgFailedCountMutex sync.Mutex
|
||||
// singleMsgSuccessCount uint64
|
||||
// singleMsgFailedCount uint64
|
||||
// singleMsgSuccessCountMutex sync.Mutex
|
||||
// singleMsgFailedCountMutex sync.Mutex
|
||||
|
||||
msgDatabase controller.CommonMsgDatabase
|
||||
conversationRpcClient *rpcclient.ConversationRpcClient
|
||||
@ -111,62 +111,59 @@ func NewOnlineHistoryRedisConsumerHandler(
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Run(channelID int) {
|
||||
for {
|
||||
select {
|
||||
case cmd := <-och.chArrays[channelID]:
|
||||
switch cmd.Cmd {
|
||||
case SourceMessages:
|
||||
msgChannelValue := cmd.Value.(MsgChannelValue)
|
||||
ctxMsgList := msgChannelValue.ctxMsgList
|
||||
ctx := msgChannelValue.ctx
|
||||
log.ZDebug(
|
||||
for cmd := range och.chArrays[channelID] {
|
||||
switch cmd.Cmd {
|
||||
case SourceMessages:
|
||||
msgChannelValue := cmd.Value.(MsgChannelValue)
|
||||
ctxMsgList := msgChannelValue.ctxMsgList
|
||||
ctx := msgChannelValue.ctx
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"msg arrived channel",
|
||||
"channel id",
|
||||
channelID,
|
||||
"msgList length",
|
||||
len(ctxMsgList),
|
||||
"uniqueKey",
|
||||
msgChannelValue.uniqueKey,
|
||||
)
|
||||
storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList, modifyMsgList := och.getPushStorageMsgList(
|
||||
ctxMsgList,
|
||||
)
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"msg lens",
|
||||
"storageMsgList",
|
||||
len(storageMsgList),
|
||||
"notStorageMsgList",
|
||||
len(notStorageMsgList),
|
||||
"storageNotificationList",
|
||||
len(storageNotificationList),
|
||||
"notStorageNotificationList",
|
||||
len(notStorageNotificationList),
|
||||
"modifyMsgList",
|
||||
len(modifyMsgList),
|
||||
)
|
||||
conversationIDMsg := msgprocessor.GetChatConversationIDByMsg(ctxMsgList[0].message)
|
||||
conversationIDNotification := msgprocessor.GetNotificationConversationIDByMsg(ctxMsgList[0].message)
|
||||
och.handleMsg(ctx, msgChannelValue.uniqueKey, conversationIDMsg, storageMsgList, notStorageMsgList)
|
||||
och.handleNotification(
|
||||
ctx,
|
||||
msgChannelValue.uniqueKey,
|
||||
conversationIDNotification,
|
||||
storageNotificationList,
|
||||
notStorageNotificationList,
|
||||
)
|
||||
if err := och.msgDatabase.MsgToModifyMQ(ctx, msgChannelValue.uniqueKey, conversationIDNotification, modifyMsgList); err != nil {
|
||||
log.ZError(
|
||||
ctx,
|
||||
"msg arrived channel",
|
||||
"channel id",
|
||||
channelID,
|
||||
"msgList length",
|
||||
len(ctxMsgList),
|
||||
"msg to modify mq error",
|
||||
err,
|
||||
"uniqueKey",
|
||||
msgChannelValue.uniqueKey,
|
||||
)
|
||||
storageMsgList, notStorageMsgList, storageNotificationList, notStorageNotificationList, modifyMsgList := och.getPushStorageMsgList(
|
||||
ctxMsgList,
|
||||
)
|
||||
log.ZDebug(
|
||||
ctx,
|
||||
"msg lens",
|
||||
"storageMsgList",
|
||||
len(storageMsgList),
|
||||
"notStorageMsgList",
|
||||
len(notStorageMsgList),
|
||||
"storageNotificationList",
|
||||
len(storageNotificationList),
|
||||
"notStorageNotificationList",
|
||||
len(notStorageNotificationList),
|
||||
"modifyMsgList",
|
||||
len(modifyMsgList),
|
||||
modifyMsgList,
|
||||
)
|
||||
conversationIDMsg := msgprocessor.GetChatConversationIDByMsg(ctxMsgList[0].message)
|
||||
conversationIDNotification := msgprocessor.GetNotificationConversationIDByMsg(ctxMsgList[0].message)
|
||||
och.handleMsg(ctx, msgChannelValue.uniqueKey, conversationIDMsg, storageMsgList, notStorageMsgList)
|
||||
och.handleNotification(
|
||||
ctx,
|
||||
msgChannelValue.uniqueKey,
|
||||
conversationIDNotification,
|
||||
storageNotificationList,
|
||||
notStorageNotificationList,
|
||||
)
|
||||
if err := och.msgDatabase.MsgToModifyMQ(ctx, msgChannelValue.uniqueKey, conversationIDNotification, modifyMsgList); err != nil {
|
||||
log.ZError(
|
||||
ctx,
|
||||
"msg to modify mq error",
|
||||
err,
|
||||
"uniqueKey",
|
||||
msgChannelValue.uniqueKey,
|
||||
"modifyMsgList",
|
||||
modifyMsgList,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,7 +22,6 @@ import (
|
||||
|
||||
type MsgUtilsCmd struct {
|
||||
cobra.Command
|
||||
msgTool *tools.MsgTool
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) AddUserIDFlag() {
|
||||
@ -38,19 +37,19 @@ func (m *MsgUtilsCmd) AddFixAllFlag() {
|
||||
m.Command.PersistentFlags().BoolP("fixAll", "f", false, "openIM fix all seqs")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getFixAllFlag(cmdLines *cobra.Command) bool {
|
||||
/* func (m *MsgUtilsCmd) getFixAllFlag(cmdLines *cobra.Command) bool {
|
||||
fixAll, _ := cmdLines.Flags().GetBool("fixAll")
|
||||
return fixAll
|
||||
}
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddClearAllFlag() {
|
||||
m.Command.PersistentFlags().BoolP("clearAll", "c", false, "openIM clear all seqs")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getClearAllFlag(cmdLines *cobra.Command) bool {
|
||||
/* func (m *MsgUtilsCmd) getClearAllFlag(cmdLines *cobra.Command) bool {
|
||||
clearAll, _ := cmdLines.Flags().GetBool("clearAll")
|
||||
return clearAll
|
||||
}
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddSuperGroupIDFlag() {
|
||||
m.Command.PersistentFlags().StringP("superGroupID", "g", "", "openIM superGroupID")
|
||||
@ -65,19 +64,19 @@ func (m *MsgUtilsCmd) AddBeginSeqFlag() {
|
||||
m.Command.PersistentFlags().Int64P("beginSeq", "b", 0, "openIM beginSeq")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getBeginSeqFlag(cmdLines *cobra.Command) int64 {
|
||||
/* func (m *MsgUtilsCmd) getBeginSeqFlag(cmdLines *cobra.Command) int64 {
|
||||
beginSeq, _ := cmdLines.Flags().GetInt64("beginSeq")
|
||||
return beginSeq
|
||||
}
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddLimitFlag() {
|
||||
m.Command.PersistentFlags().Int64P("limit", "l", 0, "openIM limit")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getLimitFlag(cmdLines *cobra.Command) int64 {
|
||||
/* func (m *MsgUtilsCmd) getLimitFlag(cmdLines *cobra.Command) int64 {
|
||||
limit, _ := cmdLines.Flags().GetInt64("limit")
|
||||
return limit
|
||||
}
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) Execute() error {
|
||||
return m.Command.Execute()
|
||||
@ -134,6 +133,7 @@ func NewSeqCmd() *SeqCmd {
|
||||
return seqCmd
|
||||
}
|
||||
|
||||
|
||||
func (s *SeqCmd) GetSeqCmd() *cobra.Command {
|
||||
s.Command.Run = func(cmdLines *cobra.Command, args []string) {
|
||||
_, err := tools.InitMsgTool()
|
||||
|
||||
@ -26,7 +26,10 @@ import (
|
||||
|
||||
func FriendPb2DB(friend *sdkws.FriendInfo) *relation.FriendModel {
|
||||
dbFriend := &relation.FriendModel{}
|
||||
utils.CopyStructFields(dbFriend, friend)
|
||||
err := utils.CopyStructFields(dbFriend, friend)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
dbFriend.FriendUserID = friend.FriendUser.UserID
|
||||
dbFriend.CreateTime = utils.UnixSecondToTime(friend.CreateTime)
|
||||
return dbFriend
|
||||
@ -69,7 +72,11 @@ func FriendsDB2Pb(
|
||||
}
|
||||
for _, friend := range friendsDB {
|
||||
friendPb := &sdkws.FriendInfo{FriendUser: &sdkws.UserInfo{}}
|
||||
utils.CopyStructFields(friendPb, friend)
|
||||
err := utils.CopyStructFields(friendPb, friend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
friendPb.FriendUser.UserID = users[friend.FriendUserID].UserID
|
||||
friendPb.FriendUser.Nickname = users[friend.FriendUserID].Nickname
|
||||
friendPb.FriendUser.FaceURL = users[friend.FriendUserID].FaceURL
|
||||
@ -79,6 +86,7 @@ func FriendsDB2Pb(
|
||||
friendsPb = append(friendsPb, friendPb)
|
||||
}
|
||||
return friendsPb, nil
|
||||
|
||||
}
|
||||
|
||||
func FriendRequestDB2Pb(
|
||||
|
||||
49
pkg/common/db/cache/conversation.go
vendored
49
pkg/common/db/cache/conversation.go
vendored
@ -16,7 +16,6 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
@ -220,16 +219,16 @@ func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversati
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIndex(convsation *relationtb.ConversationModel, keys []string) (int, error) {
|
||||
key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID)
|
||||
for _i, _key := range keys {
|
||||
if _key == key {
|
||||
return _i, nil
|
||||
}
|
||||
}
|
||||
// func (c *ConversationRedisCache) getConversationIndex(convsation *relationtb.ConversationModel, keys []string) (int, error) {
|
||||
// key := c.getConversationKey(convsation.OwnerUserID, convsation.ConversationID)
|
||||
// for _i, _key := range keys {
|
||||
// if _key == key {
|
||||
// return _i, nil
|
||||
// }
|
||||
// }
|
||||
|
||||
return 0, errors.New("not found key:" + key + " in keys")
|
||||
}
|
||||
// return 0, errors.New("not found key:" + key + " in keys")
|
||||
// }
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*relationtb.ConversationModel, error) {
|
||||
//var keys []string
|
||||
@ -333,7 +332,7 @@ func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupI
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(conversationID string, conversationIDs []string) (int, error) {
|
||||
/* func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(conversationID string, conversationIDs []string) (int, error) {
|
||||
for _i, _conversationID := range conversationIDs {
|
||||
if _conversationID == conversationID {
|
||||
return _i, nil
|
||||
@ -341,21 +340,21 @@ func (c *ConversationRedisCache) getUserAllHasReadSeqsIndex(conversationID strin
|
||||
}
|
||||
|
||||
return 0, errors.New("not found key:" + conversationID + " in keys")
|
||||
}
|
||||
} */
|
||||
|
||||
//func (c *ConversationRedisCache) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
|
||||
// conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var keys []string
|
||||
// for _, conversarionID := range conversationIDs {
|
||||
// keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
|
||||
// }
|
||||
// return batchGetCacheMap(ctx, c.rcClient, keys, conversationIDs, c.expireTime, c.getUserAllHasReadSeqsIndex, func(ctx context.Context) (map[string]int64, error) {
|
||||
// return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
// })
|
||||
//}
|
||||
/* func (c *ConversationRedisCache) GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, conversarionID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationHasReadSeqKey(ownerUserID, conversarionID))
|
||||
}
|
||||
return batchGetCacheMap(ctx, c.rcClient, keys, conversationIDs, c.expireTime, c.getUserAllHasReadSeqsIndex, func(ctx context.Context) (map[string]int64, error) {
|
||||
return c.conversationDB.GetUserAllHasReadSeqs(ctx, ownerUserID)
|
||||
})
|
||||
} */
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache {
|
||||
cache := c.NewCache()
|
||||
|
||||
@ -75,6 +75,7 @@ func (a *authDatabase) CreateToken(ctx context.Context, userID string, platformI
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
claims := tokenverify.BuildClaims(userID, platformID, a.accessExpire)
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
tokenString, err := token.SignedString([]byte(a.accessSecret))
|
||||
|
||||
@ -105,7 +105,7 @@ func (c *conversationDatabase) SetUsersConversationFiledTx(ctx context.Context,
|
||||
now := time.Now()
|
||||
for _, v := range NotUserIDs {
|
||||
temp := new(relationtb.ConversationModel)
|
||||
if err := utils.CopyStructFields(temp, conversation); err != nil {
|
||||
if err = utils.CopyStructFields(temp, conversation); err != nil {
|
||||
return err
|
||||
}
|
||||
temp.OwnerUserID = v
|
||||
|
||||
@ -846,7 +846,7 @@ func (db *commonMsgDatabase) deleteMsgRecursion(ctx context.Context, conversatio
|
||||
}
|
||||
}
|
||||
if len(delMsgIndexs) > 0 {
|
||||
if err := db.msgDocDatabase.DeleteMsgsInOneDocByIndex(ctx, msgDocModel.DocID, delMsgIndexs); err != nil {
|
||||
if err = db.msgDocDatabase.DeleteMsgsInOneDocByIndex(ctx, msgDocModel.DocID, delMsgIndexs); err != nil {
|
||||
log.ZError(ctx, "deleteMsgRecursion DeleteMsgsInOneDocByIndex failed", err, "conversationID", conversationID, "index", index)
|
||||
}
|
||||
delStruct.minSeq = int64(msgDocModel.Msg[delMsgIndexs[len(delMsgIndexs)-1]].Msg.Seq)
|
||||
|
||||
@ -106,7 +106,7 @@ func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64
|
||||
partNumber++
|
||||
}
|
||||
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
|
||||
return nil, errors.New(fmt.Sprintf("too many parts: %d", partNumber))
|
||||
return nil, fmt.Errorf("too many parts: %d", partNumber)
|
||||
}
|
||||
if info, err := c.StatObject(ctx, c.HashPath(hash)); err == nil {
|
||||
return nil, &HashAlreadyExistsError{Object: info}
|
||||
|
||||
@ -52,8 +52,8 @@ const (
|
||||
const successCode = http.StatusOK
|
||||
|
||||
const (
|
||||
videoSnapshotImagePng = "png"
|
||||
videoSnapshotImageJpg = "jpg"
|
||||
// videoSnapshotImagePng = "png"
|
||||
// videoSnapshotImageJpg = "jpg"
|
||||
)
|
||||
|
||||
func NewCos() (s3.Interface, error) {
|
||||
|
||||
@ -140,7 +140,7 @@ func (m *Minio) initMinio(ctx context.Context) error {
|
||||
return fmt.Errorf("check bucket exists error: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
if err := m.core.Client.MakeBucket(ctx, conf.Bucket, minio.MakeBucketOptions{}); err != nil {
|
||||
if err = m.core.Client.MakeBucket(ctx, conf.Bucket, minio.MakeBucketOptions{}); err != nil {
|
||||
return fmt.Errorf("make bucket error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,11 +77,11 @@ if [[ $? -ne 0 ]]; then
|
||||
openim::log::error_exit "The service does not start properly, please check the port, query variable definition!"
|
||||
echo "+++ https://github.com/openimsdk/open-im-server/tree/main/scripts/install/environment.sh +++"
|
||||
else
|
||||
openim::color::echo $COLOR_GREEN "All components depended on by openim are running normally! "
|
||||
openim::log::success "All components depended on by openim are running normally! "
|
||||
fi
|
||||
|
||||
|
||||
openim::log::info "\n## Check OpenIM service name: \n ${OPENIM_OUTPUT_HOSTBIN}/openim-msgtransfer"
|
||||
openim::log::info "\n## Check openim service name:\n${OPENIM_OUTPUT_HOSTBIN}/openim-msgtransfer"
|
||||
result=$(. $(dirname ${BASH_SOURCE})/install/openim-msgtransfer.sh openim::msgtransfer::check)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "+++ cat openim log file >>> ${LOG_FILE}"
|
||||
@ -89,7 +89,7 @@ if [[ $? -ne 0 ]]; then
|
||||
fi
|
||||
|
||||
|
||||
echo "Check OpenIM service name:"
|
||||
echo "Check openim service name:"
|
||||
for item in "${OPENIM_ALL_SERVICE_LIBRARIES_NO_TRANSFER[@]}"; do
|
||||
echo "$item"
|
||||
done
|
||||
@ -97,8 +97,9 @@ done
|
||||
result=$(openim::util::check_process_names ${OPENIM_ALL_SERVICE_LIBRARIES_NO_TRANSFER[@]})
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "+++ cat openim log file >>> ${LOG_FILE}"
|
||||
openim::log::error "check process failed.\n $result"
|
||||
openim::log::error "check process failed.\n "
|
||||
echo "$result"
|
||||
else
|
||||
openim::color::echo $COLOR_GREEN "All openim services are running normally! "
|
||||
openim::log::success "All openim services are running normally! "
|
||||
fi
|
||||
|
||||
|
||||
@ -65,10 +65,11 @@ function openim::msgtransfer::start() {
|
||||
|
||||
function openim::msgtransfer::check() {
|
||||
PIDS=$(pgrep -f "${OPENIM_OUTPUT_HOSTBIN}/openim-msgtransfer")
|
||||
echo "transsssssssssssssssssssssss" $PIDS
|
||||
echo "$PIDS" | wc -l
|
||||
echo "tdddddddddddddddddranssssssssssssssssssssssss" $PIDS
|
||||
NUM_PROCESSES=$(echo "$PIDS" | wc -l)
|
||||
if [ -z "$PIDS" ]; then
|
||||
NUM_PROCESSES=0
|
||||
else
|
||||
NUM_PROCESSES=$(echo "$PIDS" | wc -l)
|
||||
fi
|
||||
if [ "$NUM_PROCESSES" -eq "$OPENIM_MSGGATEWAY_NUM" ]; then
|
||||
for PID in $PIDS; do
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
|
||||
@ -206,22 +206,27 @@ openim::log::status() {
|
||||
fi
|
||||
|
||||
timestamp=$(date +"[%Y-%m-%d %H:%M:%S %Z]")
|
||||
echo_log "+++ ${timestamp} ${1}"
|
||||
echo_log "${timestamp} ${1}"
|
||||
shift
|
||||
for message; do
|
||||
echo_log " ${message}"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
openim::log::success() {
|
||||
local V="${V:-0}"
|
||||
if [[ ${OPENIM_VERBOSE} < ${V} ]]; then
|
||||
return
|
||||
fi
|
||||
timestamp=$(date +"%m%d %H:%M:%S")
|
||||
echo_log -e "${COLOR_GREEN}[success ${timestamp}] ${COLOR_SUFFIX}==> " "$@"
|
||||
local timestamp=$(date +"%m%d %H:%M:%S")
|
||||
local reset_color='\033[0m'
|
||||
echo_log -e "${COLOR_GREEN}[success ${timestamp}]${COLOR_SUFFIX}==> ${COLOR_GREEN}$@${reset_color}"
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
function openim::log::test_log() {
|
||||
echo_log "test log"
|
||||
openim::log::info "openim::log::info"
|
||||
|
||||
@ -360,8 +360,8 @@ openim::util::check_ports() {
|
||||
|
||||
# If any of the processes is not running, return a status of 1.
|
||||
if [[ ${#not_started[@]} -ne 0 ]]; then
|
||||
openim::color::echo $COLOR_RED " OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED " OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
cat "$TMP_LOG_FILE" | awk '{print "\033[31m" $0 "\033[0m"}'
|
||||
return 1
|
||||
else
|
||||
@ -402,7 +402,6 @@ openim::util::check_process_names() {
|
||||
local not_started=()
|
||||
local started=()
|
||||
|
||||
echo "Checking processes: $*"
|
||||
# Iterate over each given process name
|
||||
for process_name in "$@"; do
|
||||
# Use `pgrep` to find process IDs related to the given process name
|
||||
@ -432,7 +431,7 @@ openim::util::check_process_names() {
|
||||
if [[ ${#not_started[@]} -ne 0 ]]; then
|
||||
echo "Not started processes:"
|
||||
for process_name in "${not_started[@]}"; do
|
||||
openim::log::error "Process $process_name is not started."
|
||||
echo "Process $process_name is not started."
|
||||
done
|
||||
fi
|
||||
|
||||
@ -446,8 +445,8 @@ openim::util::check_process_names() {
|
||||
|
||||
# Return status
|
||||
if [[ ${#not_started[@]} -ne 0 ]]; then
|
||||
openim::color::echo $COLOR_RED " OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED " OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
cat "$TMP_LOG_FILE" | awk '{print "\033[31m" $0 "\033[0m"}'
|
||||
return 1
|
||||
else
|
||||
@ -477,7 +476,7 @@ openim::util::check_process_names_for_stop() {
|
||||
local not_started=()
|
||||
local started=()
|
||||
|
||||
echo "Checking processes: $*"
|
||||
|
||||
# Iterate over each given process name
|
||||
for process_name in "$@"; do
|
||||
# Use `pgrep` to find process IDs related to the given process name
|
||||
@ -1635,8 +1634,8 @@ openim::util::check_ports() {
|
||||
|
||||
# If any of the processes is not running, return a status of 1.
|
||||
if [[ ${#not_started[@]} -ne 0 ]]; then
|
||||
openim::color::echo $COLOR_RED " OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED " OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stdout Log >> cat ${LOG_FILE}"
|
||||
openim::color::echo $COLOR_RED "OpenIM Stderr Log >> cat ${STDERR_LOG_FILE}"
|
||||
echo ""
|
||||
cat "$TMP_LOG_FILE" | awk '{print "\033[31m" $0 "\033[0m"}'
|
||||
return 1
|
||||
@ -2879,7 +2878,7 @@ function openim::util::check_process_names_for_stop() {
|
||||
NUM_PROCESSES=$(echo "$PIDS" | wc -l | xargs)
|
||||
if [ "$NUM_PROCESSES" -gt 0 ]; then
|
||||
all_stopped=false
|
||||
openim::log::error "Found $NUM_PROCESSES processes for ${service}"
|
||||
echo "Found $NUM_PROCESSES processes for ${service}"
|
||||
for PID in $PIDS; do
|
||||
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
echo -e "\033[31m$(ps -p $PID -o pid,cmd)\033[0m"
|
||||
@ -2889,7 +2888,7 @@ function openim::util::check_process_names_for_stop() {
|
||||
openim::log::error "Unsupported OS type: $OSTYPE"
|
||||
fi
|
||||
done
|
||||
openim::log::error "Processes for ${service} have not been stopped properly."
|
||||
echo "Processes for ${service} have not been stopped properly. " "$NUM_PROCESSES"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@ function execute_start_scripts() {
|
||||
|
||||
# Check if the script file exists and is executable.
|
||||
if [[ -x "$script_path" ]]; then
|
||||
openim::log::status "Starting script: ${script_path##*/}" # Log the script name.
|
||||
openim::log::info "Starting script: ${script_path##*/}" # Log the script name.
|
||||
|
||||
# Execute the script with the constructed argument.
|
||||
result=$("$script_path" "$arg")
|
||||
@ -57,7 +57,7 @@ function execute_start_scripts() {
|
||||
|
||||
|
||||
|
||||
openim::log::info "\n# Begin to start all openim service scripts"
|
||||
|
||||
|
||||
|
||||
openim::golang::check_openim_binaries
|
||||
@ -75,7 +75,7 @@ echo "You need to start the following scripts in order: ${OPENIM_SERVER_SCRIPTAR
|
||||
# TODO Prelaunch tools, simple for now, can abstract functions later
|
||||
TOOLS_START_SCRIPTS_PATH=${START_SCRIPTS_PATH}/openim-tools.sh
|
||||
|
||||
openim::log::info "\n## Pre Starting OpenIM services"
|
||||
openim::log::status "\n## Pre Starting OpenIM services"
|
||||
${TOOLS_START_SCRIPTS_PATH} openim::tools::pre-start
|
||||
|
||||
|
||||
@ -88,7 +88,7 @@ fi
|
||||
|
||||
|
||||
|
||||
openim::log::info "\n## Starting OpenIM services"
|
||||
openim::log::status "\n## Starting openim scripts: "
|
||||
execute_start_scripts
|
||||
|
||||
sleep 2
|
||||
@ -109,7 +109,7 @@ if [[ $? -ne 0 ]]; then
|
||||
fi
|
||||
|
||||
|
||||
openim::log::info "\n## Post Starting OpenIM services"
|
||||
openim::log::info "\n## Post Starting openim services"
|
||||
${TOOLS_START_SCRIPTS_PATH} openim::tools::post-start
|
||||
|
||||
openim::color::echo $COLOR_GREEN "✨ All OpenIM services have been successfully started!"
|
||||
openim::log::success "All openim services have been successfully started!"
|
||||
@ -26,14 +26,14 @@ OPENIM_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
|
||||
source "${OPENIM_ROOT}/scripts/install/common.sh"
|
||||
|
||||
openim::log::info "\n# Begin to stop all openim service"
|
||||
openim::log::status "Begin to stop all openim service"
|
||||
|
||||
echo -e "\n++ Stop all processes in the path ${OPENIM_OUTPUT_HOSTBIN}"
|
||||
openim::log::status "Stop all processes in the path ${OPENIM_OUTPUT_HOSTBIN}"
|
||||
|
||||
openim::util::stop_services_with_name "${OPENIM_OUTPUT_HOSTBIN}"
|
||||
# todo OPENIM_ALL_SERVICE_LIBRARIES
|
||||
|
||||
sleep 1
|
||||
|
||||
|
||||
|
||||
max_retries=15
|
||||
@ -44,15 +44,17 @@ do
|
||||
result=$(openim::util::check_process_names_for_stop)
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "+++ cat openim log file >>> ${LOG_FILE}"
|
||||
openim::log::error "stop process failed. continue waiting\n" "${result}"
|
||||
sleep 1
|
||||
if [[ $attempt -ne 0 ]] ; then
|
||||
echo "+++ cat openim log file >>> ${LOG_FILE} " $attempt
|
||||
openim::log::error "stop process failed. continue waiting\n" "${result}"
|
||||
fi
|
||||
sleep 1
|
||||
((attempt++))
|
||||
else
|
||||
openim::log::success "✨ All processes to be stopped"
|
||||
openim::log::success " All openim processes to be stopped"
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
openim::log::error "✨ openim processes stopped failed"
|
||||
openim::log::error "openim processes stopped failed"
|
||||
exit 1
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user