mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-12-17 01:59:08 +08:00
배포 준비비
This commit is contained in:
parent
e8616c87c6
commit
62513f9d95
4
.gitignore
vendored
4
.gitignore
vendored
@ -5,10 +5,6 @@ __pycache__
|
|||||||
env
|
env
|
||||||
runtime
|
runtime
|
||||||
.idea
|
.idea
|
||||||
output
|
|
||||||
logs
|
|
||||||
SoVITS_weights*/
|
|
||||||
GPT_weights*/
|
|
||||||
TEMP
|
TEMP
|
||||||
weight.json
|
weight.json
|
||||||
ffmpeg*
|
ffmpeg*
|
||||||
|
|||||||
111
Dockerfile
111
Dockerfile
@ -1,62 +1,73 @@
|
|||||||
ARG CUDA_VERSION=12.6
|
FROM nvidia/cuda:12.8.1-cudnn-runtime-ubuntu22.04
|
||||||
ARG TORCH_BASE=full
|
|
||||||
|
|
||||||
FROM xxxxrt666/torch-base:cu${CUDA_VERSION}-${TORCH_BASE}
|
# GPT-SoVITS Docker Image
|
||||||
|
# This image contains the GPT-SoVITS TTS model with GPU support
|
||||||
|
|
||||||
LABEL maintainer="XXXXRT"
|
# Prevent interactive prompts during build
|
||||||
LABEL version="V4"
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
LABEL description="Docker image for GPT-SoVITS"
|
|
||||||
|
|
||||||
ARG CUDA_VERSION=12.6
|
# Install system dependencies
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
python3.11 \
|
||||||
|
python3.11-dev \
|
||||||
|
python3.11-distutils \
|
||||||
|
git \
|
||||||
|
wget \
|
||||||
|
curl \
|
||||||
|
ffmpeg \
|
||||||
|
libsndfile1 \
|
||||||
|
build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ENV CUDA_VERSION=${CUDA_VERSION}
|
# Install pip for Python 3.11
|
||||||
|
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
# Set Python 3.11 as default
|
||||||
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \
|
||||||
WORKDIR /workspace/GPT-SoVITS
|
update-alternatives --install /usr/bin/pip pip /usr/local/bin/pip3.11 1
|
||||||
|
|
||||||
COPY Docker /workspace/GPT-SoVITS/Docker/
|
|
||||||
|
|
||||||
ARG LITE=false
|
|
||||||
ENV LITE=${LITE}
|
|
||||||
|
|
||||||
ARG WORKFLOW=false
|
|
||||||
ENV WORKFLOW=${WORKFLOW}
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
ENV TARGETPLATFORM=${TARGETPLATFORM}
|
|
||||||
|
|
||||||
RUN bash Docker/miniconda_install.sh
|
|
||||||
|
|
||||||
COPY extra-req.txt /workspace/GPT-SoVITS/
|
|
||||||
|
|
||||||
COPY requirements.txt /workspace/GPT-SoVITS/
|
|
||||||
|
|
||||||
COPY install.sh /workspace/GPT-SoVITS/
|
|
||||||
|
|
||||||
RUN bash Docker/install_wrapper.sh
|
|
||||||
|
|
||||||
EXPOSE 9871 9872 9873 9874 9880
|
|
||||||
|
|
||||||
ENV PYTHONPATH="/workspace/GPT-SoVITS"
|
|
||||||
|
|
||||||
RUN conda init bash && echo "conda activate base" >> ~/.bashrc
|
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
RUN rm -rf /workspace/GPT-SoVITS
|
# Environment variables for GPU
|
||||||
|
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||||
|
|
||||||
|
# Create GPT-SoVITS directory (will be mounted via volumes)
|
||||||
|
RUN mkdir -p /workspace/GPT-SoVITS
|
||||||
|
|
||||||
|
# Set working directory to GPT-SoVITS
|
||||||
WORKDIR /workspace/GPT-SoVITS
|
WORKDIR /workspace/GPT-SoVITS
|
||||||
|
|
||||||
COPY . /workspace/GPT-SoVITS
|
# Install PyTorch with CUDA 12.8 support first
|
||||||
|
RUN pip install --no-cache-dir \
|
||||||
|
torch==2.7.1 \
|
||||||
|
torchaudio==2.7.1 \
|
||||||
|
--index-url https://download.pytorch.org/whl/cu128
|
||||||
|
|
||||||
CMD ["/bin/bash", "-c", "\
|
# Copy GPT-SoVITS requirements.txt from current directory
|
||||||
rm -rf /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models && \
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
rm -rf /workspace/GPT-SoVITS/GPT_SoVITS/text/G2PWModel && \
|
|
||||||
rm -rf /workspace/GPT-SoVITS/tools/asr/models && \
|
# Install GPT-SoVITS dependencies from requirements.txt
|
||||||
rm -rf /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights && \
|
RUN pip install --no-cache-dir -r /tmp/requirements.txt
|
||||||
ln -s /workspace/models/pretrained_models /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models && \
|
|
||||||
ln -s /workspace/models/G2PWModel /workspace/GPT-SoVITS/GPT_SoVITS/text/G2PWModel && \
|
# Install additional dependencies for STT (not in requirements.txt)
|
||||||
ln -s /workspace/models/asr_models /workspace/GPT-SoVITS/tools/asr/models && \
|
RUN pip install --no-cache-dir \
|
||||||
ln -s /workspace/models/uvr5_weights /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights && \
|
"faster-whisper>=1.1.0" \
|
||||||
exec bash"]
|
soundfile \
|
||||||
|
BS-RoFormer
|
||||||
|
|
||||||
|
# Expose API port
|
||||||
|
EXPOSE 9881
|
||||||
|
|
||||||
|
# Default configuration
|
||||||
|
ENV API_HOST=0.0.0.0
|
||||||
|
ENV API_PORT=9881
|
||||||
|
ENV CONFIG_PATH=GPT_SoVITS/configs/tts_infer.yaml
|
||||||
|
|
||||||
|
# Health check - Just check if the API server is responding (any response is OK, even 4xx)
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||||
|
CMD curl -s -o /dev/null -w "%{http_code}" http://localhost:9881/tts | grep -E "^[2-4][0-9][0-9]$" > /dev/null || exit 1
|
||||||
124
Dockerfile.prod
Normal file
124
Dockerfile.prod
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
# Production Dockerfile for GPT-SoVITS
|
||||||
|
# Self-contained image with GPU support and all dependencies
|
||||||
|
|
||||||
|
FROM nvidia/cuda:12.8.1-cudnn-runtime-ubuntu22.04
|
||||||
|
|
||||||
|
# Prevent interactive prompts during build
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install system dependencies including AWS CLI
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
software-properties-common \
|
||||||
|
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||||
|
&& apt-get update && apt-get install -y \
|
||||||
|
python3.11 \
|
||||||
|
python3.11-dev \
|
||||||
|
python3.11-distutils \
|
||||||
|
git \
|
||||||
|
wget \
|
||||||
|
curl \
|
||||||
|
ffmpeg \
|
||||||
|
libsndfile1 \
|
||||||
|
build-essential \
|
||||||
|
unzip \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install AWS CLI v2
|
||||||
|
RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \
|
||||||
|
&& unzip awscliv2.zip \
|
||||||
|
&& ./aws/install \
|
||||||
|
&& rm -rf awscliv2.zip aws
|
||||||
|
|
||||||
|
# Install pip for Python 3.11
|
||||||
|
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11
|
||||||
|
|
||||||
|
# Set Python 3.11 as default
|
||||||
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1 && \
|
||||||
|
update-alternatives --install /usr/bin/pip pip /usr/local/bin/pip3.11 1
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
# Environment variables for GPU
|
||||||
|
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||||
|
|
||||||
|
# Install PyTorch with CUDA 12.8 support first
|
||||||
|
RUN pip install --no-cache-dir \
|
||||||
|
torch==2.7.1 \
|
||||||
|
torchaudio==2.7.1 \
|
||||||
|
--index-url https://download.pytorch.org/whl/cu128
|
||||||
|
|
||||||
|
# Copy only requirements.txt for dependency pre-installation
|
||||||
|
COPY requirements.txt /tmp/gpt-sovits-requirements.txt
|
||||||
|
|
||||||
|
# Install GPT-SoVITS dependencies
|
||||||
|
RUN pip install --no-cache-dir -r /tmp/gpt-sovits-requirements.txt
|
||||||
|
|
||||||
|
# Install additional dependencies for STT
|
||||||
|
RUN pip install --no-cache-dir \
|
||||||
|
"faster-whisper>=1.1.0" \
|
||||||
|
soundfile \
|
||||||
|
BS-RoFormer
|
||||||
|
|
||||||
|
# Create cache storage directory (shared with ML Service)
|
||||||
|
RUN mkdir -p /app/shared/cache_storage
|
||||||
|
|
||||||
|
# Expose API port
|
||||||
|
EXPOSE 9881
|
||||||
|
|
||||||
|
# Environment variables for S3 model download
|
||||||
|
ENV API_HOST=0.0.0.0
|
||||||
|
ENV API_PORT=9881
|
||||||
|
ENV CONFIG_PATH=GPT_SoVITS/configs/tts_infer.yaml
|
||||||
|
ENV MODEL_DIR=/workspace/GPT-SoVITS
|
||||||
|
ENV S3_MODEL_URI=s3://shiftup-enterprise-ai-service/tts/model_registry/GPT-SoVITS/
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=90s --retries=3 \
|
||||||
|
CMD curl -s -o /dev/null -w "%{http_code}" http://localhost:9881/tts | grep -E "^[2-4][0-9][0-9]$" > /dev/null || exit 1
|
||||||
|
|
||||||
|
# Create entrypoint script for S3 model download
|
||||||
|
RUN echo '#!/bin/bash\n\
|
||||||
|
set -e\n\
|
||||||
|
\n\
|
||||||
|
echo "=== GPT-SoVITS Startup (EC2 Production) ==="\n\
|
||||||
|
echo "Model directory: $MODEL_DIR"\n\
|
||||||
|
echo "S3 source: $S3_MODEL_URI"\n\
|
||||||
|
echo ""\n\
|
||||||
|
\n\
|
||||||
|
# Check if model directory already exists and has content\n\
|
||||||
|
if [ -d "$MODEL_DIR" ] && [ "$(ls -A $MODEL_DIR 2>/dev/null)" ]; then\n\
|
||||||
|
echo "✓ Model files already exist in $MODEL_DIR"\n\
|
||||||
|
echo "Skipping S3 download..."\n\
|
||||||
|
else\n\
|
||||||
|
echo "Downloading model files from S3..."\n\
|
||||||
|
echo "This may take several minutes on first startup..."\n\
|
||||||
|
\n\
|
||||||
|
# Create model directory\n\
|
||||||
|
mkdir -p $MODEL_DIR\n\
|
||||||
|
\n\
|
||||||
|
# Download from S3 (using AWS CLI)\n\
|
||||||
|
if ! aws s3 sync "$S3_MODEL_URI" "$MODEL_DIR" --delete --quiet; then\n\
|
||||||
|
echo "ERROR: Failed to download model files from S3"\n\
|
||||||
|
echo "Please check:"\n\
|
||||||
|
echo " 1. AWS credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_DEFAULT_REGION)"\n\
|
||||||
|
echo " 2. S3 URI: $S3_MODEL_URI"\n\
|
||||||
|
echo " 3. Network connectivity to S3"\n\
|
||||||
|
exit 1\n\
|
||||||
|
fi\n\
|
||||||
|
\n\
|
||||||
|
echo "✓ Model files downloaded successfully"\n\
|
||||||
|
fi\n\
|
||||||
|
\n\
|
||||||
|
echo ""\n\
|
||||||
|
echo "Changing to model directory..."\n\
|
||||||
|
cd $MODEL_DIR\n\
|
||||||
|
\n\
|
||||||
|
echo "Current directory: $(pwd)"\n\
|
||||||
|
echo ""\n\
|
||||||
|
echo "Starting GPT-SoVITS API on $API_HOST:$API_PORT..."\n\
|
||||||
|
exec python api_v2.py -a "$API_HOST" -p "$API_PORT" -c "$CONFIG_PATH"\n\
|
||||||
|
' > /entrypoint.sh && chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
0
GPT_weights/.gitkeep
Normal file
0
GPT_weights/.gitkeep
Normal file
0
GPT_weights_v2/.gitkeep
Normal file
0
GPT_weights_v2/.gitkeep
Normal file
0
GPT_weights_v2Pro/.gitkeep
Normal file
0
GPT_weights_v2Pro/.gitkeep
Normal file
0
GPT_weights_v2ProPlus/.gitkeep
Normal file
0
GPT_weights_v2ProPlus/.gitkeep
Normal file
0
GPT_weights_v3/.gitkeep
Normal file
0
GPT_weights_v3/.gitkeep
Normal file
0
GPT_weights_v4/.gitkeep
Normal file
0
GPT_weights_v4/.gitkeep
Normal file
0
SoVITS_weights/.gitkeep
Normal file
0
SoVITS_weights/.gitkeep
Normal file
0
SoVITS_weights_v2/.gitkeep
Normal file
0
SoVITS_weights_v2/.gitkeep
Normal file
0
SoVITS_weights_v2Pro/.gitkeep
Normal file
0
SoVITS_weights_v2Pro/.gitkeep
Normal file
0
SoVITS_weights_v2ProPlus/.gitkeep
Normal file
0
SoVITS_weights_v2ProPlus/.gitkeep
Normal file
0
SoVITS_weights_v3/.gitkeep
Normal file
0
SoVITS_weights_v3/.gitkeep
Normal file
0
SoVITS_weights_v4/.gitkeep
Normal file
0
SoVITS_weights_v4/.gitkeep
Normal file
25
buildspec.yml
Normal file
25
buildspec.yml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
version: 0.2
|
||||||
|
|
||||||
|
env:
|
||||||
|
variables:
|
||||||
|
ECR_REGISTRY: "public.ecr.aws/r2p3x7v0/ailabs"
|
||||||
|
IMAGE_NAME: "sai2ply"
|
||||||
|
SERVICE_NAME: "gpt-sovits"
|
||||||
|
|
||||||
|
phases:
|
||||||
|
pre_build:
|
||||||
|
commands:
|
||||||
|
- aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws
|
||||||
|
- export IMAGE_TAG=${CODEBUILD_RESOLVED_SOURCE_VERSION:0:7}
|
||||||
|
- export FULL_IMAGE_URI="${ECR_REGISTRY}/${IMAGE_NAME}:${SERVICE_NAME}-${IMAGE_TAG}"
|
||||||
|
- export LATEST_IMAGE_URI="${ECR_REGISTRY}/${IMAGE_NAME}:${SERVICE_NAME}-latest"
|
||||||
|
|
||||||
|
build:
|
||||||
|
commands:
|
||||||
|
- docker build -f Dockerfile.prod -t $FULL_IMAGE_URI -t $LATEST_IMAGE_URI .
|
||||||
|
|
||||||
|
post_build:
|
||||||
|
commands:
|
||||||
|
- docker push $FULL_IMAGE_URI
|
||||||
|
- docker push $LATEST_IMAGE_URI
|
||||||
|
- echo "Image pushed - $LATEST_IMAGE_URI"
|
||||||
Loading…
x
Reference in New Issue
Block a user