mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-10-26 21:22:16 +08:00
update loadConfig and discovery logic in kubernetes.
This commit is contained in:
parent
c9e13e645e
commit
0d8f6ddb22
@ -5,6 +5,9 @@ etcd:
|
||||
username: ''
|
||||
password: ''
|
||||
|
||||
kubenetes:
|
||||
namespace: default
|
||||
|
||||
rpcService:
|
||||
user: user-rpc-service
|
||||
friend: friend-rpc-service
|
||||
@ -15,5 +18,3 @@ rpcService:
|
||||
auth: auth-rpc-service
|
||||
conversation: conversation-rpc-service
|
||||
third: third-rpc-service
|
||||
|
||||
|
||||
|
||||
@ -17,9 +17,6 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -28,6 +25,11 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -40,6 +42,8 @@ type Config struct {
|
||||
API config.API
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
|
||||
RuntimeEnv string
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
@ -48,10 +52,12 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
var client discovery.SvcDiscoveryRegistry
|
||||
|
||||
// Determine whether zk is passed according to whether it is a clustered deployment
|
||||
client, err = kdisc.NewDiscoveryRegister(&config.Discovery)
|
||||
client, err = kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
@ -81,7 +87,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort))
|
||||
|
||||
server := http.Server{Addr: address, Handler: router}
|
||||
log.CInfo(ctx, "API server is initializing", "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
|
||||
log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
|
||||
go func() {
|
||||
err = server.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
|
||||
@ -16,11 +16,13 @@ package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"time"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
@ -31,11 +33,15 @@ type Config struct {
|
||||
RedisConfig config.Redis
|
||||
WebhooksConfig config.Webhooks
|
||||
Discovery config.Discovery
|
||||
|
||||
RuntimeEnv string
|
||||
}
|
||||
|
||||
// Start run ws server.
|
||||
func Start(ctx context.Context, index int, conf *Config) error {
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "autoSetPorts", conf.MsgGateway.RPC.AutoSetPorts,
|
||||
conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv, "autoSetPorts", conf.MsgGateway.RPC.AutoSetPorts,
|
||||
"rpcPorts", conf.MsgGateway.RPC.Ports,
|
||||
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
|
||||
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
|
||||
|
||||
@ -29,6 +29,7 @@ import (
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
@ -51,6 +52,8 @@ type MsgTransfer struct {
|
||||
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
runTimeEnv string
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@ -64,7 +67,9 @@ type Config struct {
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "prometheusPorts",
|
||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
|
||||
config.MsgTransfer.Prometheus.Ports, "index", index)
|
||||
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
@ -75,7 +80,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := discRegister.NewDiscoveryRegister(&config.Discovery)
|
||||
client, err := discRegister.NewDiscoveryRegister(&config.Discovery, runTimeEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -115,6 +120,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
msgTransfer := &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
runTimeEnv: runTimeEnv,
|
||||
}
|
||||
return msgTransfer.Start(index, config)
|
||||
}
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
@ -26,14 +27,18 @@ type CronTaskConfig struct {
|
||||
CronTask config.CronTask
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
|
||||
runTimeEnv string
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
|
||||
config.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", config.runTimeEnv, "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
|
||||
if config.CronTask.RetainChatRecords < 1 {
|
||||
return errs.New("msg destruct time must be greater than 1").Wrap()
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(&config.Discovery)
|
||||
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.runTimeEnv)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
|
||||
@ -16,12 +16,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/version"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -105,18 +105,19 @@ func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
// Load common configuration file
|
||||
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
|
||||
for configFileName, configStruct := range opts.configMap {
|
||||
err := config.LoadConfig(filepath.Join(configDirectory, configFileName),
|
||||
ConfigEnvPrefixMap[configFileName], configStruct)
|
||||
err := config.Load(configDirectory, configFileName, ConfigEnvPrefixMap[configFileName], runtimeEnv, configStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Load common log configuration file
|
||||
return config.LoadConfig(filepath.Join(configDirectory, LogConfigFileName),
|
||||
ConfigEnvPrefixMap[LogConfigFileName], &r.log)
|
||||
return config.Load(configDirectory, LogConfigFileName, ConfigEnvPrefixMap[LogConfigFileName], runtimeEnv, &r.log)
|
||||
}
|
||||
|
||||
func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
|
||||
|
||||
@ -15,10 +15,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/openimsdk/tools/s3/aws"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/s3/aws"
|
||||
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
@ -474,9 +475,14 @@ type ZooKeeper struct {
|
||||
type Discovery struct {
|
||||
Enable string `mapstructure:"enable"`
|
||||
Etcd Etcd `mapstructure:"etcd"`
|
||||
Kubernetes Kubernetes `mapstructure:"kubernetes"`
|
||||
RpcService RpcService `mapstructure:"rpcService"`
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
}
|
||||
|
||||
type Etcd struct {
|
||||
RootDirectory string `mapstructure:"rootDirectory"`
|
||||
Address []string `mapstructure:"address"`
|
||||
|
||||
@ -16,6 +16,12 @@ package config
|
||||
|
||||
const ConfKey = "conf"
|
||||
|
||||
const (
|
||||
MountConfigFilePath = "CONFIG_PATH"
|
||||
DeploymentType = "DEPLOYMENT_TYPE"
|
||||
KUBERNETES = "kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDirPerm is used for creating general directories, allowing the owner to read, write, and execute,
|
||||
// while the group and others can only read and execute.
|
||||
|
||||
@ -1,13 +1,29 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/spf13/viper"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func LoadConfig(path string, envPrefix string, config any) error {
|
||||
func Load(configDirectory string, configFileName string, envPrefix string, runtimeEnv string, config any) error {
|
||||
if runtimeEnv == KUBERNETES {
|
||||
mountPath := os.Getenv(MountConfigFilePath)
|
||||
if mountPath == "" {
|
||||
return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty")
|
||||
}
|
||||
|
||||
return loadConfig(filepath.Join(mountPath, configFileName), envPrefix, config)
|
||||
}
|
||||
|
||||
return loadConfig(filepath.Join(configDirectory, configFileName), envPrefix, config)
|
||||
}
|
||||
|
||||
func loadConfig(path string, envPrefix string, config any) error {
|
||||
v := viper.New()
|
||||
v.SetConfigFile(path)
|
||||
v.SetEnvPrefix(envPrefix)
|
||||
|
||||
@ -1,27 +1,51 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadLogConfig(t *testing.T) {
|
||||
var log Log
|
||||
err := LoadConfig("../../../config/log.yml", "IMENV_LOG", &log)
|
||||
os.Setenv("IMENV_LOG_REMAINLOGLEVEL", "5")
|
||||
err := Load("../../../config/", "log.yml", "IMENV_LOG", &log)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "../../../../logs/", log.StorageLocation)
|
||||
t.Log(log.RemainLogLevel)
|
||||
// assert.Equal(t, "../../../../logs/", log.StorageLocation)
|
||||
}
|
||||
|
||||
func TestLoadMongoConfig(t *testing.T) {
|
||||
var mongo Mongo
|
||||
// os.Setenv("DEPLOYMENT_TYPE", "kubernetes")
|
||||
os.Setenv("IMENV_MONGODB_PASSWORD", "openIM1231231")
|
||||
// os.Setenv("IMENV_MONGODB_URI", "openIM123")
|
||||
// os.Setenv("IMENV_MONGODB_USERNAME", "openIM123")
|
||||
err := Load("../../../config/", "mongodb.yml", "IMENV_MONGODB", &mongo)
|
||||
// err := LoadConfig("../../../config/mongodb.yml", "IMENV_MONGODB", &mongo)
|
||||
|
||||
assert.Nil(t, err)
|
||||
t.Log(mongo.Password)
|
||||
// assert.Equal(t, "openIM123", mongo.Password)
|
||||
t.Log(os.Getenv("IMENV_MONGODB_PASSWORD"))
|
||||
t.Log(mongo)
|
||||
// //export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
|
||||
// assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
|
||||
// //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112"
|
||||
// assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports)
|
||||
}
|
||||
|
||||
func TestLoadMinioConfig(t *testing.T) {
|
||||
var storageConfig Minio
|
||||
err := LoadConfig("../../../config/minio.yml", "IMENV_MINIO", &storageConfig)
|
||||
err := Load("../../../config/minio.yml", "IMENV_MINIO", "", &storageConfig)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "openim", storageConfig.Bucket)
|
||||
}
|
||||
|
||||
func TestLoadWebhooksConfig(t *testing.T) {
|
||||
var webhooks Webhooks
|
||||
err := LoadConfig("../../../config/webhooks.yml", "IMENV_WEBHOOKS", &webhooks)
|
||||
err := Load("../../../config/webhooks.yml", "IMENV_WEBHOOKS", "", &webhooks)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 5, webhooks.BeforeAddBlack.Timeout)
|
||||
|
||||
@ -29,7 +53,7 @@ func TestLoadWebhooksConfig(t *testing.T) {
|
||||
|
||||
func TestLoadOpenIMRpcUserConfig(t *testing.T) {
|
||||
var user User
|
||||
err := LoadConfig("../../../config/openim-rpc-user.yml", "IMENV_OPENIM_RPC_USER", &user)
|
||||
err := Load("../../../config/openim-rpc-user.yml", "IMENV_OPENIM_RPC_USER", "", &user)
|
||||
assert.Nil(t, err)
|
||||
//export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
|
||||
assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
|
||||
@ -39,14 +63,14 @@ func TestLoadOpenIMRpcUserConfig(t *testing.T) {
|
||||
|
||||
func TestLoadNotificationConfig(t *testing.T) {
|
||||
var noti Notification
|
||||
err := LoadConfig("../../../config/notification.yml", "IMENV_NOTIFICATION", ¬i)
|
||||
err := Load("../../../config/notification.yml", "IMENV_NOTIFICATION", "", ¬i)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title)
|
||||
}
|
||||
|
||||
func TestLoadOpenIMThirdConfig(t *testing.T) {
|
||||
var third Third
|
||||
err := LoadConfig("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", &third)
|
||||
err := Load("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", "", &third)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "enabled", third.Object.Enable)
|
||||
assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint)
|
||||
|
||||
@ -15,19 +15,26 @@
|
||||
package discoveryregister
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister/kubernetes"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
|
||||
"github.com/openimsdk/tools/discovery/kubernetes"
|
||||
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
|
||||
func NewDiscoveryRegister(discovery *config.Discovery) (discovery.SvcDiscoveryRegistry, error) {
|
||||
func NewDiscoveryRegister(discovery *config.Discovery, runtimeEnv string) (discovery.SvcDiscoveryRegistry, error) {
|
||||
if runtimeEnv == "kubernetes" {
|
||||
discovery.Enable = "kubernetes"
|
||||
}
|
||||
|
||||
switch discovery.Enable {
|
||||
case "k8s":
|
||||
return kubernetes.NewK8sDiscoveryRegister(discovery.RpcService.MessageGateway)
|
||||
case "kubernetes":
|
||||
return kubernetes.NewKubernetesConnManager(discovery.Kubernetes.Namespace)
|
||||
case "etcd":
|
||||
return etcd.NewSvcDiscoveryRegistry(
|
||||
discovery.Etcd.RootDirectory,
|
||||
|
||||
@ -16,184 +16,281 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/stathat/consistent"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// K8sDR represents the Kubernetes service discovery and registration client.
|
||||
type K8sDR struct {
|
||||
options []grpc.DialOption
|
||||
rpcRegisterAddr string
|
||||
gatewayHostConsistent *consistent.Consistent
|
||||
gatewayName string
|
||||
type KubernetesConnManager struct {
|
||||
clientset *kubernetes.Clientset
|
||||
namespace string
|
||||
dialOptions []grpc.DialOption
|
||||
|
||||
selfTarget string
|
||||
|
||||
mu sync.RWMutex
|
||||
connMap map[string][]*grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewK8sDiscoveryRegister(gatewayName string) (discovery.SvcDiscoveryRegistry, error) {
|
||||
gatewayConsistent := consistent.New()
|
||||
gatewayHosts := getMsgGatewayHost(context.Background(), gatewayName)
|
||||
for _, v := range gatewayHosts {
|
||||
gatewayConsistent.Add(v)
|
||||
}
|
||||
return &K8sDR{gatewayHostConsistent: gatewayConsistent}, nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) Register(serviceName, host string, port int, opts ...grpc.DialOption) error {
|
||||
if serviceName != cli.gatewayName {
|
||||
cli.rpcRegisterAddr = serviceName
|
||||
} else {
|
||||
cli.rpcRegisterAddr = getSelfHost(context.Background(), cli.gatewayName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) UnRegister() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) CreateRpcRootNodes(serviceNames []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) RegisterConf2Registry(key string, conf []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) GetConfFromRegistry(key string) ([]byte, error) {
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) {
|
||||
host, err := cli.gatewayHostConsistent.Get(userId)
|
||||
// NewKubernetesConnManager creates a new connection manager that uses Kubernetes services for service discovery.
|
||||
func NewKubernetesConnManager(namespace string, options ...grpc.DialOption) (*KubernetesConnManager, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "GetUserIdHashGatewayHost error", err)
|
||||
}
|
||||
return host, err
|
||||
return nil, fmt.Errorf("failed to create in-cluster config: %v", err)
|
||||
}
|
||||
|
||||
func getSelfHost(ctx context.Context, gatewayName string) string {
|
||||
port := 88
|
||||
instance := "openimserver"
|
||||
selfPodName := os.Getenv("MY_POD_NAME")
|
||||
ns := os.Getenv("MY_POD_NAMESPACE")
|
||||
statefuleIndex := 0
|
||||
gatewayEnds := strings.Split(gatewayName, ":")
|
||||
if len(gatewayEnds) != 2 {
|
||||
log.ZError(ctx, "msggateway RpcRegisterName is error:config.RpcRegisterName.OpenImMessageGatewayName", errors.New("config error"))
|
||||
} else {
|
||||
port, _ = strconv.Atoi(gatewayEnds[1])
|
||||
}
|
||||
podInfo := strings.Split(selfPodName, "-")
|
||||
instance = podInfo[0]
|
||||
count := len(podInfo)
|
||||
statefuleIndex, _ = strconv.Atoi(podInfo[count-1])
|
||||
host := fmt.Sprintf("%s-openim-msggateway-%d.%s-openim-msggateway-headless.%s.svc.cluster.local:%d", instance, statefuleIndex, instance, ns, port)
|
||||
return host
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create clientset: %v", err)
|
||||
}
|
||||
|
||||
// like openimserver-openim-msggateway-0.openimserver-openim-msggateway-headless.openim-lin.svc.cluster.local:88.
|
||||
// Replica set in kubernetes environment
|
||||
func getMsgGatewayHost(ctx context.Context, gatewayName string) []string {
|
||||
port := 88
|
||||
instance := "openimserver"
|
||||
selfPodName := os.Getenv("MY_POD_NAME")
|
||||
replicas := os.Getenv("MY_MSGGATEWAY_REPLICACOUNT")
|
||||
ns := os.Getenv("MY_POD_NAMESPACE")
|
||||
gatewayEnds := strings.Split(gatewayName, ":")
|
||||
if len(gatewayEnds) != 2 {
|
||||
log.ZError(ctx, "msggateway RpcRegisterName is error:config.RpcRegisterName.OpenImMessageGatewayName", errors.New("config error"))
|
||||
} else {
|
||||
port, _ = strconv.Atoi(gatewayEnds[1])
|
||||
}
|
||||
nReplicas, _ := strconv.Atoi(replicas)
|
||||
podInfo := strings.Split(selfPodName, "-")
|
||||
instance = podInfo[0]
|
||||
var ret []string
|
||||
for i := 0; i < nReplicas; i++ {
|
||||
host := fmt.Sprintf("%s-openim-msggateway-%d.%s-openim-msggateway-headless.%s.svc.cluster.local:%d", instance, i, instance, ns, port)
|
||||
ret = append(ret, host)
|
||||
}
|
||||
log.ZDebug(ctx, "getMsgGatewayHost", "instance", instance, "selfPodName", selfPodName, "replicas", replicas, "ns", ns, "ret", ret)
|
||||
return ret
|
||||
k := &KubernetesConnManager{
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
dialOptions: options,
|
||||
connMap: make(map[string][]*grpc.ClientConn),
|
||||
}
|
||||
|
||||
// GetConns returns the gRPC client connections to the specified service.
|
||||
func (cli *K8sDR) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) {
|
||||
go k.watchEndpoints()
|
||||
|
||||
// This conditional checks if the serviceName is not the OpenImMessageGatewayName.
|
||||
// It seems to handle a special case for the OpenImMessageGateway.
|
||||
if serviceName != cli.gatewayName {
|
||||
// DialContext creates a client connection to the given target (serviceName) using the specified context.
|
||||
// 'cli.options' are likely default or common options for all connections in this struct.
|
||||
// 'opts...' allows for additional gRPC dial options to be passed and used.
|
||||
conn, err := grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...)
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// The function returns a slice of client connections with the new connection, or an error if occurred.
|
||||
return []*grpc.ClientConn{conn}, err
|
||||
} else {
|
||||
// This block is executed if the serviceName is OpenImMessageGatewayName.
|
||||
// 'ret' will accumulate the connections to return.
|
||||
var ret []*grpc.ClientConn
|
||||
func (k *KubernetesConnManager) initializeConns(serviceName string) error {
|
||||
port, err := k.getServicePort(serviceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// getMsgGatewayHost presumably retrieves hosts for the message gateway service.
|
||||
// The context is passed, likely for cancellation and timeout control.
|
||||
gatewayHosts := getMsgGatewayHost(ctx, cli.gatewayName)
|
||||
endpoints, err := k.clientset.CoreV1().Endpoints(k.namespace).Get(context.Background(), serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get endpoints for service %s: %v", serviceName, err)
|
||||
}
|
||||
|
||||
// Iterating over the retrieved gateway hosts.
|
||||
for _, host := range gatewayHosts {
|
||||
// Establishes a connection to each host.
|
||||
// Again, appending cli.options with any additional opts provided.
|
||||
conn, err := grpc.DialContext(ctx, host, append(cli.options, opts...)...)
|
||||
var conns []*grpc.ClientConn
|
||||
for _, subset := range endpoints.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
target := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
conn, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to dial endpoint %s: %v", target, err)
|
||||
}
|
||||
conns = append(conns, conn)
|
||||
}
|
||||
}
|
||||
|
||||
// If there's an error while dialing any host, the function returns immediately with the error.
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
k.connMap[serviceName] = conns
|
||||
|
||||
// go k.watchEndpoints(serviceName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConns returns gRPC client connections for a given Kubernetes service name.
|
||||
func (k *KubernetesConnManager) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) {
|
||||
k.mu.RLock()
|
||||
conns, exists := k.connMap[serviceName]
|
||||
defer k.mu.RUnlock()
|
||||
|
||||
if exists {
|
||||
return conns, nil
|
||||
}
|
||||
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
|
||||
// Check if another goroutine has already initialized the connections when we released the read lock
|
||||
conns, exists = k.connMap[serviceName]
|
||||
if exists {
|
||||
return conns, nil
|
||||
}
|
||||
|
||||
if err := k.initializeConns(serviceName); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize connections for service %s: %v", serviceName, err)
|
||||
}
|
||||
|
||||
return k.connMap[serviceName], nil
|
||||
}
|
||||
|
||||
// GetConn returns a single gRPC client connection for a given Kubernetes service name.
|
||||
func (k *KubernetesConnManager) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
port, err := k.getServicePort(serviceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
// If the connection is successful, it is added to the 'ret' slice.
|
||||
ret = append(ret, conn)
|
||||
}
|
||||
}
|
||||
// After all hosts are processed, the slice of connections is returned.
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *K8sDR) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
fmt.Println("SVC port:", port)
|
||||
|
||||
return grpc.DialContext(ctx, serviceName, append(cli.options, opts...)...)
|
||||
target := fmt.Sprintf("%s.%s.svc.cluster.local:%d", serviceName, k.namespace, port)
|
||||
|
||||
fmt.Println("SVC target:", target)
|
||||
|
||||
return grpc.DialContext(
|
||||
ctx,
|
||||
target,
|
||||
append([]grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, k.dialOptions...)...,
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *K8sDR) GetSelfConnTarget() string {
|
||||
|
||||
return cli.rpcRegisterAddr
|
||||
// GetSelfConnTarget returns the connection target for the current service.
|
||||
func (k *KubernetesConnManager) GetSelfConnTarget() string {
|
||||
return k.selfTarget
|
||||
}
|
||||
|
||||
func (cli *K8sDR) AddOption(opts ...grpc.DialOption) {
|
||||
cli.options = append(cli.options, opts...)
|
||||
// AddOption appends gRPC dial options to the existing options.
|
||||
func (k *KubernetesConnManager) AddOption(opts ...grpc.DialOption) {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
k.dialOptions = append(k.dialOptions, opts...)
|
||||
}
|
||||
|
||||
func (cli *K8sDR) CloseConn(conn *grpc.ClientConn) {
|
||||
// CloseConn closes a given gRPC client connection.
|
||||
func (k *KubernetesConnManager) CloseConn(conn *grpc.ClientConn) {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
// do not use this method for call rpc.
|
||||
func (cli *K8sDR) GetClientLocalConns() map[string][]*grpc.ClientConn {
|
||||
log.ZError(context.Background(), "should not call this function!", nil)
|
||||
// Close closes all gRPC connections managed by KubernetesConnManager.
|
||||
func (k *KubernetesConnManager) Close() {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
for _, conns := range k.connMap {
|
||||
for _, conn := range conns {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}
|
||||
k.connMap = make(map[string][]*grpc.ClientConn)
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) Register(serviceName, host string, port int, opts ...grpc.DialOption) error {
|
||||
return nil
|
||||
}
|
||||
func (k *KubernetesConnManager) UnRegister() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *K8sDR) Close() {
|
||||
|
||||
func (k *KubernetesConnManager) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) getServicePort(serviceName string) (int32, error) {
|
||||
svc, err := k.clientset.CoreV1().Services(k.namespace).Get(context.Background(), serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Print("namespace:", k.namespace)
|
||||
return 0, fmt.Errorf("failed to get service %s: %v", serviceName, err)
|
||||
}
|
||||
|
||||
if len(svc.Spec.Ports) == 0 {
|
||||
return 0, fmt.Errorf("service %s has no ports defined", serviceName)
|
||||
}
|
||||
|
||||
return svc.Spec.Ports[0].Port, nil
|
||||
}
|
||||
|
||||
// watchEndpoints listens for changes in Pod resources.
|
||||
func (k *KubernetesConnManager) watchEndpoints() {
|
||||
informerFactory := informers.NewSharedInformerFactory(k.clientset, time.Minute*10)
|
||||
informer := informerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
// Watch for Pod changes (add, update, delete)
|
||||
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
k.handleEndpointChange(obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
k.handleEndpointChange(newObj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
k.handleEndpointChange(obj)
|
||||
},
|
||||
})
|
||||
|
||||
informerFactory.Start(context.Background().Done())
|
||||
<-context.Background().Done() // Block forever
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) handleEndpointChange(obj interface{}) {
|
||||
endpoint, ok := obj.(*v1.Endpoints)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
serviceName := endpoint.Name
|
||||
if err := k.initializeConns(serviceName); err != nil {
|
||||
fmt.Printf("Error initializing connections for %s: %v\n", serviceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// =================
|
||||
|
||||
// initEndpoints initializes connections by fetching all available endpoints in the specified namespace.
|
||||
|
||||
// func (k *KubernetesConnManager) initEndpoints() error {
|
||||
// k.mu.Lock()
|
||||
// defer k.mu.Unlock()
|
||||
|
||||
// pods, err := k.clientset.CoreV1().Pods(k.namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to list pods: %v", err)
|
||||
// }
|
||||
|
||||
// for _, pod := range pods.Items {
|
||||
// if pod.Status.Phase == v1.PodRunning {
|
||||
// target := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
// conn, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
// conn, err := k.createGRPCConnection(pod)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create GRPC connection for pod %s: %v", pod.Name, err)
|
||||
// }
|
||||
// k.connMap[pod.Name] = append(k.connMap[pod.Name], conn)
|
||||
// }
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// -----
|
||||
|
||||
// func (k *KubernetesConnManager) watchEndpoints1(serviceName string) {
|
||||
// // watch for changes to the service's endpoints
|
||||
// informerFactory := informers.NewSharedInformerFactory(k.clientset, time.Minute)
|
||||
// endpointsInformer := informerFactory.Core().V1().Endpoints().Informer()
|
||||
|
||||
// endpointsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
// AddFunc: func(obj interface{}) {
|
||||
// eps := obj.(*v1.Endpoints)
|
||||
// if eps.Name == serviceName {
|
||||
// k.initializeConns(serviceName)
|
||||
// }
|
||||
// },
|
||||
// UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
// eps := newObj.(*v1.Endpoints)
|
||||
// if eps.Name == serviceName {
|
||||
// k.initializeConns(serviceName)
|
||||
// }
|
||||
// },
|
||||
// DeleteFunc: func(obj interface{}) {
|
||||
// eps := obj.(*v1.Endpoints)
|
||||
// if eps.Name == serviceName {
|
||||
// k.mu.Lock()
|
||||
// defer k.mu.Unlock()
|
||||
// for _, conn := range k.connMap[serviceName] {
|
||||
// _ = conn.Close()
|
||||
// }
|
||||
// delete(k.connMap, serviceName)
|
||||
// }
|
||||
// },
|
||||
// })
|
||||
|
||||
// informerFactory.Start(wait.NeverStop)
|
||||
// informerFactory.WaitForCacheSync(wait.NeverStop)
|
||||
// }
|
||||
|
||||
@ -18,9 +18,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/grpc/status"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -28,6 +25,13 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"strconv"
|
||||
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
@ -37,7 +41,6 @@ import (
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Start rpc server.
|
||||
@ -51,6 +54,8 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
|
||||
netErr error
|
||||
)
|
||||
|
||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
if !autoSetPorts {
|
||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||
if err != nil {
|
||||
@ -107,11 +112,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
|
||||
registerIP = network.GetListenIP(registerIP)
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
|
||||
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", portStr,
|
||||
log.CInfo(ctx, "RPC server is initializing", "runTimeEnv", runTimeEnv, "rpcRegisterName", rpcRegisterName, "rpcPort", portStr,
|
||||
"prometheusPorts", prometheusConfig.Ports)
|
||||
|
||||
defer listener.Close()
|
||||
client, err := kdisc.NewDiscoveryRegister(discovery)
|
||||
client, err := kdisc.NewDiscoveryRegister(discovery, runTimeEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
)
|
||||
|
||||
const maxRetry = 180
|
||||
@ -78,35 +79,37 @@ func initConfig(configDir string) (*config.Mongo, *config.Redis, *config.Kafka,
|
||||
discovery = &config.Discovery{}
|
||||
thirdConfig = &config.Third{}
|
||||
)
|
||||
err := config.LoadConfig(filepath.Join(configDir, cmd.MongodbConfigFileName), cmd.ConfigEnvPrefixMap[cmd.MongodbConfigFileName], mongoConfig)
|
||||
runtimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
err := config.Load(configDir, cmd.MongodbConfigFileName, cmd.ConfigEnvPrefixMap[cmd.MongodbConfigFileName], runtimeEnv, mongoConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
err = config.LoadConfig(filepath.Join(configDir, cmd.RedisConfigFileName), cmd.ConfigEnvPrefixMap[cmd.RedisConfigFileName], redisConfig)
|
||||
err = config.Load(configDir, cmd.RedisConfigFileName, cmd.ConfigEnvPrefixMap[cmd.RedisConfigFileName], runtimeEnv, redisConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
err = config.LoadConfig(filepath.Join(configDir, cmd.KafkaConfigFileName), cmd.ConfigEnvPrefixMap[cmd.KafkaConfigFileName], kafkaConfig)
|
||||
err = config.Load(configDir, cmd.KafkaConfigFileName, cmd.ConfigEnvPrefixMap[cmd.KafkaConfigFileName], runtimeEnv, kafkaConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
err = config.LoadConfig(filepath.Join(configDir, cmd.OpenIMRPCThirdCfgFileName), cmd.ConfigEnvPrefixMap[cmd.OpenIMRPCThirdCfgFileName], thirdConfig)
|
||||
err = config.Load(configDir, cmd.OpenIMRPCThirdCfgFileName, cmd.ConfigEnvPrefixMap[cmd.OpenIMRPCThirdCfgFileName], runtimeEnv, thirdConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
if thirdConfig.Object.Enable == "minio" {
|
||||
err = config.LoadConfig(filepath.Join(configDir, cmd.MinioConfigFileName), cmd.ConfigEnvPrefixMap[cmd.MinioConfigFileName], minioConfig)
|
||||
err = config.Load(configDir, cmd.MinioConfigFileName, cmd.ConfigEnvPrefixMap[cmd.MinioConfigFileName], runtimeEnv, minioConfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
} else {
|
||||
minioConfig = nil
|
||||
}
|
||||
err = config.LoadConfig(filepath.Join(configDir, cmd.DiscoveryConfigFilename), cmd.ConfigEnvPrefixMap[cmd.DiscoveryConfigFilename], discovery)
|
||||
err = config.Load(configDir, cmd.DiscoveryConfigFilename, cmd.ConfigEnvPrefixMap[cmd.DiscoveryConfigFilename], runtimeEnv, discovery)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user