From 905c602edcc2af8f768d71303f81e32ab8369563 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Sat, 3 May 2025 05:07:36 +0100 Subject: [PATCH] make image smaller --- .github/workflows/docker-publish.yaml | 20 +++---- Docker/anaconda_install.sh | 8 +++ Docker/install_wrapper.sh | 33 +++++++++++ Docker/setup.sh | 80 --------------------------- Dockerfile | 32 ++++++----- docker-compose.yaml | 4 ++ 6 files changed, 72 insertions(+), 105 deletions(-) create mode 100644 Docker/install_wrapper.sh delete mode 100644 Docker/setup.sh diff --git a/.github/workflows/docker-publish.yaml b/.github/workflows/docker-publish.yaml index 819486d6..39785290 100644 --- a/.github/workflows/docker-publish.yaml +++ b/.github/workflows/docker-publish.yaml @@ -26,19 +26,19 @@ jobs: include: - cuda_version: 124 lite: true - cuda_base: runtime + torch_base: lite tag_prefix: cu124-lite - cuda_version: 124 lite: false - cuda_base: devel + torch_base: full tag_prefix: cu124 - cuda_version: 128 lite: true - cuda_base: runtime + torch_base: lite tag_prefix: cu128-lite - cuda_version: 128 lite: false - cuda_base: devel + torch_base: full tag_prefix: cu128 steps: @@ -98,7 +98,7 @@ jobs: platforms: linux/amd64 build-args: | LITE=${{ matrix.lite }} - CUDA_BASE=${{ matrix.cuda_base }} + TORCH_BASE=${{ matrix.torch_base }} CUDA_VERSION=${{ matrix.cuda_version }} WORKFLOW=true tags: | @@ -113,19 +113,19 @@ jobs: include: - cuda_version: 124 lite: true - cuda_base: runtime + torch_base: lite tag_prefix: cu124-lite - cuda_version: 124 lite: false - cuda_base: devel + torch_base: full tag_prefix: cu124 - cuda_version: 128 lite: true - cuda_base: runtime + torch_base: lite tag_prefix: cu128-lite - cuda_version: 128 lite: false - cuda_base: devel + torch_base: full tag_prefix: cu128 steps: @@ -185,7 +185,7 @@ jobs: platforms: linux/arm64 build-args: | LITE=${{ matrix.lite }} - CUDA_BASE=${{ matrix.cuda_base }} + TORCH_BASE=${{ matrix.torch_base }} CUDA_VERSION=${{ matrix.cuda_version }} WORKFLOW=true tags: | diff --git a/Docker/anaconda_install.sh b/Docker/anaconda_install.sh index e97974da..96a2a172 100644 --- a/Docker/anaconda_install.sh +++ b/Docker/anaconda_install.sh @@ -53,6 +53,14 @@ source "$HOME/anaconda3/etc/profile.d/conda.sh" "$HOME/anaconda3/bin/conda" install gcc=14 gxx ffmpeg cmake make unzip -q -y +if [ "$CUDA_VERSION" = "12.8" ]; then + "$HOME/anaconda3/bin/pip" install torch torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/cu128 +elif [ "$CUDA_VERSION" = "12.4" ]; then + "$HOME/anaconda3/bin/pip" install torch==2.5.1 torchaudio==2.5.1 --no-cache-dir --index-url https://download.pytorch.org/whl/cu124 +fi + +"$HOME/anaconda3/bin/pip" cache purge + rm $LOG_PATH rm -rf "$HOME/anaconda3/pkgs" diff --git a/Docker/install_wrapper.sh b/Docker/install_wrapper.sh new file mode 100644 index 00000000..bca97107 --- /dev/null +++ b/Docker/install_wrapper.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" + +cd "$SCRIPT_DIR" || exit 1 + +cd .. || exit 1 + +set -e + +source "$HOME/anaconda3/etc/profile.d/conda.sh" + +mkdir GPT_SoVITS + +mkdir GPT_SoVITS/text + +ln -s /workspace/models/pretrained_models /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models + +ln -s /workspace/models/G2PWModel /workspace/GPT-SoVITS/GPT_SoVITS/text/G2PWModel + +bash install.sh --device "CU${CUDA_VERSION}" --source HF + +pip cache purge + +pip show torch + +rm -rf /tmp/* /var/tmp/* + +rm -rf "$HOME/anaconda3/pkgs" + +mkdir "$HOME/anaconda3/pkgs" + +rm -rf /root/.conda /root/.cache diff --git a/Docker/setup.sh b/Docker/setup.sh deleted file mode 100644 index af907015..00000000 --- a/Docker/setup.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" - -cd "$SCRIPT_DIR" || exit 1 - -cd .. || exit 1 - -set -e - -WORKFLOW=${WORKFLOW:-"false"} -LITE=${LITE:-"false"} - -if [ "$WORKFLOW" = "true" ]; then - WGET_CMD="wget -nv --tries=25 --wait=5 --read-timeout=40 --retry-on-http-error=404" -else - WGET_CMD="wget --tries=25 --wait=5 --read-timeout=40 --retry-on-http-error=404" -fi - -USE_FUNASR=false -USE_FASTERWHISPER=false - -if [ "$LITE" = "true" ]; then - USE_FUNASR=true - USE_FASTERWHISPER=false -else - USE_FUNASR=true - USE_FASTERWHISPER=true -fi - -if [ "$USE_FUNASR" = "true" ]; then - echo "Downloading funasr..." && - $WGET_CMD "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/funasr.zip" && - unzip -q funasr.zip -d tools/asr/models/ && - rm -rf funasr.zip -else - echo "Skipping funasr download" -fi - -if [ "$USE_FASTERWHISPER" = "true" ]; then - echo "Downloading faster-whisper..." && - $WGET_CMD "https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/faster-whisper.zip" && - unzip -q faster-whisper.zip -d tools/asr/models/ && - rm -rf faster-whisper.zip -else - echo "Skipping faster-whisper download" -fi - -source "$HOME/anaconda3/etc/profile.d/conda.sh" - -if [ "$CUDA_VERSION" = 128 ]; then - pip install torch torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/cu128 -elif [ "$CUDA_VERSION" = 124 ]; then - pip install torch==2.5.1 torchaudio==2.5.1 --no-cache-dir --index-url https://download.pytorch.org/whl/cu124 -fi - -if [ "$LITE" = "true" ]; then - bash install.sh --device "CU${CUDA_VERSION}" --source HF -elif [ "$LITE" = "false" ]; then - bash install.sh --device "CU${CUDA_VERSION}" --source HF --download-uvr5 -else - exit 1 -fi - -mkdir -p /workspace/model -mv /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models /workspace/model/ -mv /workspace/GPT-SoVITS/tools/asr/models /workspace/model/ -mv /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights /workspace/model/ - -pip cache purge - -pip show torch - -rm -rf /tmp/* /var/tmp/* - -rm -rf "$HOME/anaconda3/pkgs" - -mkdir "$HOME/anaconda3/pkgs" - -rm -rf /root/.conda /root/.cache diff --git a/Dockerfile b/Dockerfile index aaa92a3a..0a1b8f5c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,21 @@ ARG CUDA_VERSION=124 -ARG CUDA_BASE=runtime +ARG TORCH_BASE=full -FROM xxxxrt666/cuda-base:cu${CUDA_VERSION}-${CUDA_BASE} +FROM xxxxrt666/torch-base:cu${CUDA_VERSION}-${TORCH_BASE} LABEL maintainer="XXXXRT" LABEL version="V4" LABEL description="Docker image for GPT-SoVITS" -ARG CUDA_VERSION=12.4 +ARG CUDA_VERSION=124 ENV CUDA_VERSION=${CUDA_VERSION} +SHELL ["/bin/bash", "-c"] + WORKDIR /workspace/GPT-SoVITS -COPY . /workspace/GPT-SoVITS +COPY Docker /workspace/GPT-SoVITS/ ARG LITE=false ENV LITE=${LITE} @@ -24,19 +26,15 @@ ENV WORKFLOW=${WORKFLOW} ARG TARGETPLATFORM ENV TARGETPLATFORM=${TARGETPLATFORM} -ENV HOME="/root" - RUN bash Docker/anaconda_install.sh -ENV PATH="$HOME/anaconda3/bin:$PATH" +COPY extra-req.txt /workspace/GPT-SoVITS/ -SHELL ["/bin/bash", "-c"] +COPY requirements.txt /workspace/GPT-SoVITS/ -ENV PATH="/usr/local/cuda/bin:$PATH" -ENV CUDA_HOME="/usr/local/cuda" -ENV MAKEFLAGS="-j$(nproc)" +COPY install.sh /workspace/GPT-SoVITS/ -RUN bash Docker/setup.sh +RUN bash Docker/install_wrapper.sh EXPOSE 9871 9872 9873 9874 9880 @@ -44,11 +42,15 @@ ENV PYTHONPATH="/workspace/GPT-SoVITS" RUN conda init bash && echo "conda activate base" >> ~/.bashrc +COPY . /workspace/GPT-SoVITS + CMD ["/bin/bash", "-c", "\ rm -rf /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models && \ + rm -rf /workspace/GPT-SoVITS/GPT_SoVITS/text/G2PWModel && \ rm -rf /workspace/GPT-SoVITS/tools/asr/models && \ rm -rf /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights && \ - ln -s /workspace/model/pretrained_models /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models && \ - ln -s /workspace/model/models /workspace/GPT-SoVITS/tools/asr/models && \ - ln -s /workspace/model/uvr5_weights /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights && \ + ln -s /workspace/models/pretrained_models /workspace/GPT-SoVITS/GPT_SoVITS/pretrained_models && \ + ln -s /workspace/models/G2PWModel /workspace/GPT-SoVITS/GPT_SoVITS/text/G2PWModel && \ + ln -s /workspace/models/asr_models /workspace/GPT-SoVITS/tools/asr/models && \ + ln -s /workspace/models/uvr5_weights /workspace/GPT-SoVITS/tools/uvr5/uvr5_weights && \ exec bash"] \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index f519cef3..cea85f7e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -13,6 +13,7 @@ services: volumes: - .:/workspace/GPT-SoVITS - /dev/null:/workspace/GPT-SoVITS/pretrained_models + - /dev/null:/workspace/GPT-SoVITS/text/G2PWModel - /dev/null:/workspace/tools/asr/models - /dev/null:/workspace/tools/uvr5/uvr5_weights environment: @@ -34,6 +35,7 @@ services: volumes: - .:/workspace/GPT-SoVITS - /dev/null:/workspace/GPT-SoVITS/pretrained_models + - /dev/null:/workspace/GPT-SoVITS/text/G2PWModel - /dev/null:/workspace/tools/asr/models - /dev/null:/workspace/tools/uvr5/uvr5_weights environment: @@ -55,6 +57,7 @@ services: volumes: - .:/workspace/GPT-SoVITS - /dev/null:/workspace/GPT-SoVITS/pretrained_models + - /dev/null:/workspace/GPT-SoVITS/text/G2PWModel - /dev/null:/workspace/tools/asr/models - /dev/null:/workspace/tools/uvr5/uvr5_weights environment: @@ -76,6 +79,7 @@ services: volumes: - .:/workspace/GPT-SoVITS - /dev/null:/workspace/GPT-SoVITS/pretrained_models + - /dev/null:/workspace/GPT-SoVITS/text/G2PWModel - /dev/null:/workspace/tools/asr/models - /dev/null:/workspace/tools/uvr5/uvr5_weights environment: