mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-08-24 22:05:59 +08:00
Accelerate Docker Image Building
This commit is contained in:
parent
2e03b9ad41
commit
a373df3e51
66
.github/workflows/docker-publish.yaml
vendored
66
.github/workflows/docker-publish.yaml
vendored
@ -4,8 +4,39 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
generate-meta:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.meta.outputs.tag }}
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Generate Tag
|
||||
id: meta
|
||||
run: |
|
||||
DATE=$(date +'%Y%m%d')
|
||||
COMMIT=$(git rev-parse --short=6 HEAD)
|
||||
echo "tag=${DATE}-${COMMIT}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-and-push:
|
||||
needs: generate-meta
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- cuda_version: 12.4
|
||||
use_fasterwhisper: false
|
||||
tag_prefix: lite-cu124
|
||||
- cuda_version: 12.4
|
||||
use_fasterwhisper: true
|
||||
tag_prefix: cu124
|
||||
- cuda_version: 12.8
|
||||
use_fasterwhisper: false
|
||||
tag_prefix: lite-cu128
|
||||
- cuda_version: 12.8
|
||||
use_fasterwhisper: true
|
||||
tag_prefix: cu128
|
||||
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
@ -19,6 +50,7 @@ jobs:
|
||||
sudo apt clean
|
||||
echo "After cleanup:"
|
||||
df -h
|
||||
sudo du -hxd1 / | sort -hr | head -n 20
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -29,30 +61,6 @@ jobs:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
|
||||
- name: Generate Tag
|
||||
id: meta
|
||||
run: |
|
||||
DATE=$(date +'%Y%m%d')
|
||||
COMMIT=$(git rev-parse --short=6 HEAD)
|
||||
echo "tag=${DATE}-${COMMIT}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and Push Lite Docker Image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
build-args: |
|
||||
USE_FUNASR=true
|
||||
USE_FASTERWHISPER=false
|
||||
CUDA_VERSION=12.4
|
||||
WGET_CMD=wget -nv --tries=25 --wait=5 --read-timeout=40 --retry-on-http-error=404
|
||||
tags: |
|
||||
xxxxrt666/gpt-sovits:lite-cu124-${{ steps.meta.outputs.tag }}
|
||||
xxxxrt666/gpt-sovits:latest-lite
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
@ -61,11 +69,11 @@ jobs:
|
||||
push: true
|
||||
build-args: |
|
||||
USE_FUNASR=true
|
||||
USE_FASTERWHISPER=true
|
||||
CUDA_VERSION=12.4
|
||||
USE_FASTERWHISPER=${{ matrix.use_fasterwhisper }}
|
||||
CUDA_VERSION=${{ matrix.cuda_version }}
|
||||
WGET_CMD=wget -nv --tries=25 --wait=5 --read-timeout=40 --retry-on-http-error=404
|
||||
tags: |
|
||||
xxxxrt666/gpt-sovits:cu124-${{ steps.meta.outputs.tag }}
|
||||
xxxxrt666/gpt-sovits:latest
|
||||
xxxxrt666/gpt-sovits:${{ matrix.tag_prefix }}-${{ needs.generate-meta.outputs.tag }}
|
||||
xxxxrt666/gpt-sovits:latest-${{ matrix.tag_prefix }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
@ -2,6 +2,10 @@ ARG CUDA_VERSION=12.4
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}.1-cudnn-runtime-ubuntu22.04
|
||||
|
||||
ARG CUDA_VERSION
|
||||
|
||||
ENV CUDA_VERSION=${CUDA_VERSION}
|
||||
|
||||
LABEL maintainer="XXXXRT"
|
||||
LABEL version="V4-0429"
|
||||
LABEL description="Docker image for GPT-SoVITS"
|
||||
@ -68,7 +72,7 @@ ENV MAKEFLAGS="-j$(nproc)"
|
||||
|
||||
RUN source /root/anaconda3/etc/profile.d/conda.sh && \
|
||||
conda activate GPTSoVITS && \
|
||||
bash install.sh --source HF --download-uvr5 && \
|
||||
bash install.sh --device CU${CUDA_VERSION//./} --source HF --download-uvr5 && \
|
||||
pip cache purge
|
||||
|
||||
RUN rm -rf /root/anaconda3/pkgs
|
||||
|
@ -41,9 +41,6 @@ while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--cuda)
|
||||
case "$2" in
|
||||
12.1)
|
||||
CUDA_VERSION=12.1
|
||||
;;
|
||||
12.4)
|
||||
CUDA_VERSION=12.4
|
||||
;;
|
||||
@ -52,7 +49,7 @@ while [[ $# -gt 0 ]]; do
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid CUDA_VERSION: $2"
|
||||
echo "Choose From: [12.1, 12.4, 12.8]"
|
||||
echo "Choose From: [12.4, 12.8]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
78
install.sh
78
install.sh
@ -14,15 +14,20 @@ fi
|
||||
|
||||
trap 'echo "Error Occured at \"$BASH_COMMAND\" with exit code $?"; exit 1' ERR
|
||||
|
||||
is_HF=false
|
||||
is_HF_MIRROR=false
|
||||
is_MODELSCOPE=false
|
||||
USE_CUDA=false
|
||||
USE_ROCM=false
|
||||
USE_CPU=false
|
||||
|
||||
USE_HF=false
|
||||
USE_HF_MIRROR=false
|
||||
USE_MODELSCOPE=false
|
||||
DOWNLOAD_UVR5=false
|
||||
|
||||
print_help() {
|
||||
echo "Usage: bash install.sh [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --device CU124|CU128|ROCM|MPS|CPU Specify the Device (REQUIRED)"
|
||||
echo " --source HF|HF-Mirror|ModelScope Specify the model source (REQUIRED)"
|
||||
echo " --download-uvr5 Enable downloading the UVR5 model"
|
||||
echo " -h, --help Show this help message and exit"
|
||||
@ -44,13 +49,13 @@ while [[ $# -gt 0 ]]; do
|
||||
--source)
|
||||
case "$2" in
|
||||
HF)
|
||||
is_HF=true
|
||||
USE_HF=true
|
||||
;;
|
||||
HF-Mirror)
|
||||
is_HF_MIRROR=true
|
||||
USE_HF_MIRROR=true
|
||||
;;
|
||||
ModelScope)
|
||||
is_MODELSCOPE=true
|
||||
USE_MODELSCOPE=true
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid Download Source: $2"
|
||||
@ -60,6 +65,33 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
shift 2
|
||||
;;
|
||||
--cuda)
|
||||
case "$2" in
|
||||
CU124)
|
||||
CUDA_VERSION=124
|
||||
USE_CUDA=true
|
||||
;;
|
||||
CU128)
|
||||
CUDA_VERSION=128
|
||||
USE_CUDA=true
|
||||
;;
|
||||
ROCM)
|
||||
USE_ROCM=true
|
||||
;;
|
||||
MPS)
|
||||
USE_CPU=true
|
||||
;;
|
||||
CPU)
|
||||
USE_CPU=true
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid Device: $2"
|
||||
echo "Choose From: [CU124, CU128, ROCM, MPS, CPU]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift 2
|
||||
;;
|
||||
--download-uvr5)
|
||||
DOWNLOAD_UVR5=true
|
||||
shift
|
||||
@ -76,24 +108,31 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
if ! $is_HF && ! $is_HF_MIRROR && ! $is_MODELSCOPE; then
|
||||
if ! $USE_CUDA && ! $USE_ROCM && ! $USE_CPU; then
|
||||
echo "Error: Device is REQUIRED"
|
||||
echo ""
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! $USE_HF && ! $USE_HF_MIRROR && ! $USE_MODELSCOPE; then
|
||||
echo "Error: Download Source is REQUIRED"
|
||||
echo ""
|
||||
print_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$is_HF" = "true" ]; then
|
||||
if [ "$USE_HF" = "true" ]; then
|
||||
echo "Download Model From HuggingFace"
|
||||
PRETRINED_URL="https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/pretrained_models.zip"
|
||||
G2PW_URL="https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/G2PWModel.zip"
|
||||
UVR5_URL="https://huggingface.co/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/uvr5_weights.zip"
|
||||
elif [ "$is_HF_MIRROR" = "true" ]; then
|
||||
elif [ "$USE_HF_MIRROR" = "true" ]; then
|
||||
echo "Download Model From HuggingFace-Mirror"
|
||||
PRETRINED_URL="https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/pretrained_models.zip"
|
||||
G2PW_URL="https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/G2PWModel.zip"
|
||||
UVR5_URL="https://hf-mirror.com/XXXXRT/GPT-SoVITS-Pretrained/resolve/main/uvr5_weights.zip"
|
||||
elif [ "$is_MODELSCOPE" = "true" ]; then
|
||||
elif [ "$USE_MODELSCOPE" = "true" ]; then
|
||||
echo "Download Model From ModelScope"
|
||||
PRETRINED_URL="https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/pretrained_models.zip"
|
||||
G2PW_URL="https://www.modelscope.cn/models/XXXXRT/GPT-SoVITS-Pretrained/resolve/master/G2PWModel.zip"
|
||||
@ -156,19 +195,19 @@ conda install zip -y
|
||||
|
||||
git-lfs install
|
||||
|
||||
if [ "$USE_CUDA" = true ]; then
|
||||
echo "Checking for CUDA installation..."
|
||||
if command -v nvidia-smi &>/dev/null; then
|
||||
USE_CUDA=true
|
||||
echo "CUDA found."
|
||||
else
|
||||
echo "CUDA not found."
|
||||
USE_CUDA=false
|
||||
echo "CUDA not found."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$USE_CUDA" = false ]; then
|
||||
if [ "$USE_ROCM" = true ]; then
|
||||
echo "Checking for ROCm installation..."
|
||||
if [ -d "/opt/rocm" ]; then
|
||||
USE_ROCM=true
|
||||
echo "ROCm found."
|
||||
if grep -qi "microsoft" /proc/version; then
|
||||
echo "You are running WSL."
|
||||
@ -178,20 +217,27 @@ if [ "$USE_CUDA" = false ]; then
|
||||
IS_WSL=false
|
||||
fi
|
||||
else
|
||||
echo "ROCm not found."
|
||||
USE_ROCM=false
|
||||
echo "ROCm not found."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$USE_CUDA" = true ]; then
|
||||
echo "Installing PyTorch with CUDA support..."
|
||||
if [ "$CUDA_VERSION" = 128 ]; then
|
||||
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu128
|
||||
elif [ "$CUDA_VERSION" = 124 ]; then
|
||||
pip install torch==2.5.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124
|
||||
fi
|
||||
elif [ "$USE_ROCM" = true ]; then
|
||||
echo "Installing PyTorch with ROCm support..."
|
||||
pip install torch==2.5.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/rocm6.2
|
||||
else
|
||||
elif [ "$USE_CPU" = true ]; then
|
||||
echo "Installing PyTorch for CPU..."
|
||||
pip install torch==2.5.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cpu
|
||||
else
|
||||
echo "Unknown Err"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing Python dependencies from requirements.txt..."
|
||||
|
Loading…
x
Reference in New Issue
Block a user