mirror of
https://github.com/openimsdk/open-im-server.git
synced 2025-06-26 22:40:40 +08:00
404 lines
13 KiB
YAML
404 lines
13 KiB
YAML
networks:
|
|
openim:
|
|
driver: bridge
|
|
|
|
services:
|
|
mongodb:
|
|
image: "${MONGO_IMAGE}"
|
|
ports:
|
|
- "37017:27017"
|
|
container_name: mongo
|
|
command: >
|
|
bash -c '
|
|
docker-entrypoint.sh mongod --wiredTigerCacheSizeGB $$wiredTigerCacheSizeGB --auth &
|
|
until mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval "db.runCommand({ ping: 1 })" &>/dev/null; do
|
|
echo "Waiting for MongoDB to start..."
|
|
sleep 1
|
|
done &&
|
|
mongosh -u $$MONGO_INITDB_ROOT_USERNAME -p $$MONGO_INITDB_ROOT_PASSWORD --authenticationDatabase admin --eval "
|
|
db = db.getSiblingDB(\"$$MONGO_INITDB_DATABASE\");
|
|
if (!db.getUser(\"$$MONGO_OPENIM_USERNAME\")) {
|
|
db.createUser({
|
|
user: \"$$MONGO_OPENIM_USERNAME\",
|
|
pwd: \"$$MONGO_OPENIM_PASSWORD\",
|
|
roles: [{role: \"readWrite\", db: \"$$MONGO_INITDB_DATABASE\"}]
|
|
});
|
|
print(\"User created successfully: \");
|
|
print(\"Username: $$MONGO_OPENIM_USERNAME\");
|
|
print(\"Password: $$MONGO_OPENIM_PASSWORD\");
|
|
print(\"Database: $$MONGO_INITDB_DATABASE\");
|
|
} else {
|
|
print(\"User already exists in database: $$MONGO_INITDB_DATABASE, Username: $$MONGO_OPENIM_USERNAME\");
|
|
}
|
|
" &&
|
|
tail -f /dev/null
|
|
'
|
|
volumes:
|
|
- "${DATA_DIR}/components/mongodb/data/db:/data/db"
|
|
- "${DATA_DIR}/components/mongodb/data/logs:/data/logs"
|
|
- "${DATA_DIR}/components/mongodb/data/conf:/etc/mongo"
|
|
- "${MONGO_BACKUP_DIR}:/data/backup"
|
|
environment:
|
|
- TZ=Asia/Shanghai
|
|
- wiredTigerCacheSizeGB=1
|
|
- MONGO_INITDB_ROOT_USERNAME=root
|
|
- MONGO_INITDB_ROOT_PASSWORD=openIM123
|
|
- MONGO_INITDB_DATABASE=openim_v3
|
|
- MONGO_OPENIM_USERNAME=openIM
|
|
- MONGO_OPENIM_PASSWORD=openIM123
|
|
restart: always
|
|
networks:
|
|
- openim
|
|
|
|
redis:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis
|
|
ports:
|
|
- "16379:6379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/data:/data"
|
|
- "${DATA_DIR}/components/redis/config/redis.conf:/usr/local/redis/config/redis.conf"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
restart: always
|
|
sysctls:
|
|
net.core.somaxconn: 1024
|
|
command: redis-server /usr/local/redis/config/redis.conf --requirepass openIM123 --appendonly yes
|
|
healthcheck:
|
|
test: ["CMD", "redis-cli", "-a", "openIM123", "ping"]
|
|
interval: 10s
|
|
timeout: 3s
|
|
retries: 3
|
|
start_period: 30s
|
|
networks:
|
|
- openim
|
|
|
|
redis-slave-1:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis-slave-1
|
|
ports:
|
|
- "16380:6379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/slave1:/data"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
REDIS_MASTER_HOST: redis
|
|
REDIS_MASTER_PORT: 6379
|
|
REDIS_MASTER_PASSWORD: openIM123
|
|
REDIS_SLAVE_PASSWORD: openIM123
|
|
restart: always
|
|
command: >
|
|
bash -c '
|
|
echo "port 6379" > /data/redis-slave.conf &&
|
|
echo "replicaof $${REDIS_MASTER_HOST} $${REDIS_MASTER_PORT}" >> /data/redis-slave.conf &&
|
|
echo "masterauth $${REDIS_MASTER_PASSWORD}" >> /data/redis-slave.conf &&
|
|
echo "requirepass $${REDIS_SLAVE_PASSWORD}" >> /data/redis-slave.conf &&
|
|
echo "appendonly yes" >> /data/redis-slave.conf &&
|
|
echo "replica-read-only yes" >> /data/redis-slave.conf &&
|
|
redis-server /data/redis-slave.conf
|
|
'
|
|
depends_on:
|
|
- redis
|
|
networks:
|
|
- openim
|
|
|
|
redis-slave-2:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis-slave-2
|
|
ports:
|
|
- "16381:6379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/slave2:/data"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
REDIS_MASTER_HOST: redis
|
|
REDIS_MASTER_PORT: 6379
|
|
REDIS_MASTER_PASSWORD: openIM123
|
|
REDIS_SLAVE_PASSWORD: openIM123
|
|
restart: always
|
|
command: >
|
|
bash -c '
|
|
echo "port 6379" > /data/redis-slave.conf &&
|
|
echo "replicaof $${REDIS_MASTER_HOST} $${REDIS_MASTER_PORT}" >> /data/redis-slave.conf &&
|
|
echo "masterauth $${REDIS_MASTER_PASSWORD}" >> /data/redis-slave.conf &&
|
|
echo "requirepass $${REDIS_SLAVE_PASSWORD}" >> /data/redis-slave.conf &&
|
|
echo "appendonly yes" >> /data/redis-slave.conf &&
|
|
echo "replica-read-only yes" >> /data/redis-slave.conf &&
|
|
redis-server /data/redis-slave.conf
|
|
'
|
|
depends_on:
|
|
- redis
|
|
networks:
|
|
- openim
|
|
|
|
redis-sentinel-1:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis-sentinel-1
|
|
ports:
|
|
- "26379:26379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/sentinel1:/data"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
SENTINEL_PORT: 26379
|
|
REDIS_MASTER_NAME: redis-master
|
|
REDIS_MASTER_HOST: 127.0.0.1
|
|
REDIS_MASTER_PORT: 16379
|
|
REDIS_MASTER_PASSWORD: openIM123
|
|
SENTINEL_QUORUM: 2
|
|
SENTINEL_DOWN_AFTER: 30000
|
|
SENTINEL_PARALLEL_SYNCS: 1
|
|
SENTINEL_FAILOVER_TIMEOUT: 180000
|
|
restart: always
|
|
command: >
|
|
bash -c '
|
|
echo "port $${SENTINEL_PORT}" > /data/sentinel.conf &&
|
|
echo "sentinel monitor $${REDIS_MASTER_NAME} $${REDIS_MASTER_HOST} $${REDIS_MASTER_PORT} $${SENTINEL_QUORUM}" >> /data/sentinel.conf &&
|
|
echo "sentinel auth-pass $${REDIS_MASTER_NAME} $${REDIS_MASTER_PASSWORD}" >> /data/sentinel.conf &&
|
|
echo "sentinel down-after-milliseconds $${REDIS_MASTER_NAME} $${SENTINEL_DOWN_AFTER}" >> /data/sentinel.conf &&
|
|
echo "sentinel parallel-syncs $${REDIS_MASTER_NAME} $${SENTINEL_PARALLEL_SYNCS}" >> /data/sentinel.conf &&
|
|
echo "sentinel failover-timeout $${REDIS_MASTER_NAME} $${SENTINEL_FAILOVER_TIMEOUT}" >> /data/sentinel.conf &&
|
|
echo "sentinel resolve-hostnames yes" >> /data/sentinel.conf &&
|
|
echo "sentinel announce-hostnames yes" >> /data/sentinel.conf &&
|
|
redis-sentinel /data/sentinel.conf
|
|
'
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
redis-slave-1:
|
|
condition: service_started
|
|
redis-slave-2:
|
|
condition: service_started
|
|
networks:
|
|
- openim
|
|
|
|
redis-sentinel-2:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis-sentinel-2
|
|
ports:
|
|
- "26380:26379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/sentinel2:/data"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
SENTINEL_PORT: 26379
|
|
REDIS_MASTER_NAME: redis-master
|
|
REDIS_MASTER_HOST: 127.0.0.1
|
|
REDIS_MASTER_PORT: 16379
|
|
REDIS_MASTER_PASSWORD: openIM123
|
|
SENTINEL_QUORUM: 2
|
|
SENTINEL_DOWN_AFTER: 30000
|
|
SENTINEL_PARALLEL_SYNCS: 1
|
|
SENTINEL_FAILOVER_TIMEOUT: 180000
|
|
restart: always
|
|
command: >
|
|
bash -c '
|
|
echo "port $${SENTINEL_PORT}" > /data/sentinel.conf &&
|
|
echo "sentinel monitor $${REDIS_MASTER_NAME} $${REDIS_MASTER_HOST} $${REDIS_MASTER_PORT} $${SENTINEL_QUORUM}" >> /data/sentinel.conf &&
|
|
echo "sentinel auth-pass $${REDIS_MASTER_NAME} $${REDIS_MASTER_PASSWORD}" >> /data/sentinel.conf &&
|
|
echo "sentinel down-after-milliseconds $${REDIS_MASTER_NAME} $${SENTINEL_DOWN_AFTER}" >> /data/sentinel.conf &&
|
|
echo "sentinel parallel-syncs $${REDIS_MASTER_NAME} $${SENTINEL_PARALLEL_SYNCS}" >> /data/sentinel.conf &&
|
|
echo "sentinel failover-timeout $${REDIS_MASTER_NAME} $${SENTINEL_FAILOVER_TIMEOUT}" >> /data/sentinel.conf &&
|
|
echo "sentinel resolve-hostnames yes" >> /data/sentinel.conf &&
|
|
echo "sentinel announce-hostnames yes" >> /data/sentinel.conf &&
|
|
redis-sentinel /data/sentinel.conf
|
|
'
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
redis-slave-1:
|
|
condition: service_started
|
|
redis-slave-2:
|
|
condition: service_started
|
|
networks:
|
|
- openim
|
|
|
|
redis-sentinel-3:
|
|
image: "${REDIS_IMAGE}"
|
|
container_name: redis-sentinel-3
|
|
ports:
|
|
- "26381:26379"
|
|
volumes:
|
|
- "${DATA_DIR}/components/redis/sentinel3:/data"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
SENTINEL_PORT: 26379
|
|
REDIS_MASTER_NAME: redis-master
|
|
REDIS_MASTER_HOST: 127.0.0.1
|
|
REDIS_MASTER_PORT: 16379
|
|
REDIS_MASTER_PASSWORD: openIM123
|
|
SENTINEL_QUORUM: 2
|
|
SENTINEL_DOWN_AFTER: 30000
|
|
SENTINEL_PARALLEL_SYNCS: 1
|
|
SENTINEL_FAILOVER_TIMEOUT: 180000
|
|
restart: always
|
|
command: >
|
|
bash -c '
|
|
echo "port $${SENTINEL_PORT}" > /data/sentinel.conf &&
|
|
echo "sentinel monitor $${REDIS_MASTER_NAME} $${REDIS_MASTER_HOST} $${REDIS_MASTER_PORT} $${SENTINEL_QUORUM}" >> /data/sentinel.conf &&
|
|
echo "sentinel auth-pass $${REDIS_MASTER_NAME} $${REDIS_MASTER_PASSWORD}" >> /data/sentinel.conf &&
|
|
echo "sentinel down-after-milliseconds $${REDIS_MASTER_NAME} $${SENTINEL_DOWN_AFTER}" >> /data/sentinel.conf &&
|
|
echo "sentinel parallel-syncs $${REDIS_MASTER_NAME} $${SENTINEL_PARALLEL_SYNCS}" >> /data/sentinel.conf &&
|
|
echo "sentinel failover-timeout $${REDIS_MASTER_NAME} $${SENTINEL_FAILOVER_TIMEOUT}" >> /data/sentinel.conf &&
|
|
echo "sentinel resolve-hostnames yes" >> /data/sentinel.conf &&
|
|
echo "sentinel announce-hostnames yes" >> /data/sentinel.conf &&
|
|
redis-sentinel /data/sentinel.conf
|
|
'
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
redis-slave-1:
|
|
condition: service_started
|
|
redis-slave-2:
|
|
condition: service_started
|
|
networks:
|
|
- openim
|
|
|
|
etcd:
|
|
image: "${ETCD_IMAGE}"
|
|
container_name: etcd
|
|
ports:
|
|
- "12379:2379"
|
|
- "12380:2380"
|
|
environment:
|
|
- ETCD_NAME=s1
|
|
- ETCD_DATA_DIR=/etcd-data
|
|
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
|
|
- ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379
|
|
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380
|
|
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://0.0.0.0:2380
|
|
- ETCD_INITIAL_CLUSTER=s1=http://0.0.0.0:2380
|
|
- ETCD_INITIAL_CLUSTER_TOKEN=tkn
|
|
- ETCD_INITIAL_CLUSTER_STATE=new
|
|
volumes:
|
|
- "${DATA_DIR}/components/etcd:/etcd-data"
|
|
restart: always
|
|
networks:
|
|
- openim
|
|
|
|
kafka:
|
|
image: "${KAFKA_IMAGE}"
|
|
container_name: kafka
|
|
user: root
|
|
restart: always
|
|
ports:
|
|
- "19094:9094"
|
|
volumes:
|
|
- "${DATA_DIR}/components/kafka:/bitnami/kafka"
|
|
environment:
|
|
#KAFKA_HEAP_OPTS: "-Xms128m -Xmx256m"
|
|
TZ: Asia/Shanghai
|
|
KAFKA_CFG_NODE_ID: 0
|
|
KAFKA_CFG_PROCESS_ROLES: controller,broker
|
|
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@kafka:9093
|
|
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
|
|
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,EXTERNAL://localhost:19094
|
|
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT
|
|
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
|
|
KAFKA_NUM_PARTITIONS: 8
|
|
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true"
|
|
networks:
|
|
- openim
|
|
|
|
minio:
|
|
image: "${MINIO_IMAGE}"
|
|
ports:
|
|
- "10005:9000"
|
|
- "19090:9090"
|
|
container_name: minio
|
|
volumes:
|
|
- "${DATA_DIR}/components/mnt/data:/data"
|
|
- "${DATA_DIR}/components/mnt/config:/root/.minio"
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
MINIO_ROOT_USER: root
|
|
MINIO_ROOT_PASSWORD: openIM123
|
|
restart: always
|
|
command: minio server /data --console-address ':9090'
|
|
networks:
|
|
- openim
|
|
|
|
openim-web-front:
|
|
image: ${OPENIM_WEB_FRONT_IMAGE}
|
|
container_name: openim-web-front
|
|
restart: always
|
|
ports:
|
|
- "11001:80"
|
|
networks:
|
|
- openim
|
|
|
|
openim-admin-front:
|
|
image: ${OPENIM_ADMIN_FRONT_IMAGE}
|
|
container_name: openim-admin-front
|
|
restart: always
|
|
ports:
|
|
- "11002:80"
|
|
networks:
|
|
- openim
|
|
|
|
prometheus:
|
|
image: ${PROMETHEUS_IMAGE}
|
|
container_name: prometheus
|
|
restart: always
|
|
user: root
|
|
profiles:
|
|
- m
|
|
volumes:
|
|
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
|
|
- ./config/instance-down-rules.yml:/etc/prometheus/instance-down-rules.yml
|
|
- ${DATA_DIR}/components/prometheus/data:/prometheus
|
|
command:
|
|
- "--config.file=/etc/prometheus/prometheus.yml"
|
|
- "--storage.tsdb.path=/prometheus"
|
|
- "--web.listen-address=:${PROMETHEUS_PORT}"
|
|
network_mode: host
|
|
|
|
alertmanager:
|
|
image: ${ALERTMANAGER_IMAGE}
|
|
container_name: alertmanager
|
|
restart: always
|
|
profiles:
|
|
- m
|
|
volumes:
|
|
- ./config/alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
|
- ./config/email.tmpl:/etc/alertmanager/email.tmpl
|
|
command:
|
|
- "--config.file=/etc/alertmanager/alertmanager.yml"
|
|
- "--web.listen-address=:${ALERTMANAGER_PORT}"
|
|
network_mode: host
|
|
|
|
grafana:
|
|
image: ${GRAFANA_IMAGE}
|
|
container_name: grafana
|
|
user: root
|
|
restart: always
|
|
profiles:
|
|
- m
|
|
environment:
|
|
- GF_SECURITY_ALLOW_EMBEDDING=true
|
|
- GF_SESSION_COOKIE_SAMESITE=none
|
|
- GF_SESSION_COOKIE_SECURE=true
|
|
- GF_AUTH_ANONYMOUS_ENABLED=true
|
|
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
|
- GF_SERVER_HTTP_PORT=${GRAFANA_PORT}
|
|
volumes:
|
|
- ${DATA_DIR:-./}/components/grafana:/var/lib/grafana
|
|
network_mode: host
|
|
|
|
node-exporter:
|
|
image: ${NODE_EXPORTER_IMAGE}
|
|
container_name: node-exporter
|
|
restart: always
|
|
profiles:
|
|
- m
|
|
volumes:
|
|
- /proc:/host/proc:ro
|
|
- /sys:/host/sys:ro
|
|
- /:/rootfs:ro
|
|
command:
|
|
- "--path.procfs=/host/proc"
|
|
- "--path.sysfs=/host/sys"
|
|
- "--path.rootfs=/rootfs"
|
|
- "--web.listen-address=:19100"
|
|
network_mode: host
|