diff --git a/GPT_SoVITS_Inference.ipynb b/GPT_SoVITS_Inference.ipynb index 1b8ec64d..33db42a4 100644 --- a/GPT_SoVITS_Inference.ipynb +++ b/GPT_SoVITS_Inference.ipynb @@ -2,11 +2,24 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "id": "himHYZmra7ix" - }, + "metadata": {}, "source": [ - "# Credits for bubarino giving me the huggingface import code (感谢 bubarino 给了我 huggingface 导入代码)" + "# GPT-SoVITS Infer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Env Setup (Run Once Only)\n", + "## 环境配置, 只需运行一次" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1." ] }, { @@ -17,11 +30,35 @@ }, "outputs": [], "source": [ - "!git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n", - "%cd GPT-SoVITS\n", - "!apt-get update && apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && git lfs install\n", - "!pip install -r extra-req.txt --no-deps\n", - "!pip install -r requirements.txt" + "%%writefile /content/setup.sh\n", + "set -e\n", + "cd /content\n", + "rm -rf GPT-SoVITS\n", + "git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n", + "cd GPT-SoVITS\n", + "\n", + "mkdir GPT_weights\n", + "mkdir SoVITS_weights\n", + "\n", + "\n", + "if conda env list | awk '{print $1}' | grep -Fxq \"GPTSoVITS\"; then\n", + " :\n", + "else\n", + " conda create -n GPTSoVITS python=3.10 -y\n", + "fi\n", + "\n", + "source activate GPTSoVITS\n", + "\n", + "pip install ipykernel\n", + "\n", + "bash install.sh --source HF" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2." ] }, { @@ -33,45 +70,24 @@ }, "outputs": [], "source": [ - "# @title Download pretrained models 下载预训练模型\n", - "!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", - "!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n", - "!mkdir -p /content/GPT-SoVITS/tools/uvr5\n", - "%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", - "!git clone https://huggingface.co/lj1995/GPT-SoVITS\n", - "%cd /content/GPT-SoVITS/tools/damo_asr/models\n", - "!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n", - "!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n", - "!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n", - "# @title UVR5 pretrains 安装uvr5模型\n", - "%cd /content/GPT-SoVITS/tools/uvr5\n", - "!git clone https://huggingface.co/Delik/uvr5_weights\n", - "!git config core.sparseCheckout true\n", - "!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/" + "%pip install -q condacolab\n", + "import condacolab\n", + "condacolab.install_from_url(\"https://repo.anaconda.com/archive/Anaconda3-2024.10-1-Linux-x86_64.sh\")\n", + "!cd /content && bash setup.sh" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "cPDEH-9czOJF" - }, - "outputs": [], + "cell_type": "markdown", + "metadata": {}, "source": [ - "#@title Create folder models 创建文件夹模型\n", - "import os\n", - "base_directory = \"/content/GPT-SoVITS\"\n", - "folder_names = [\"SoVITS_weights\", \"GPT_weights\"]\n", - "\n", - "for folder_name in folder_names:\n", - " if os.path.exists(os.path.join(base_directory, folder_name)):\n", - " print(f\"The folder '{folder_name}' already exists. (文件夹'{folder_name}'已经存在。)\")\n", - " else:\n", - " os.makedirs(os.path.join(base_directory, folder_name))\n", - " print(f\"The folder '{folder_name}' was created successfully! (文件夹'{folder_name}'已成功创建!)\")\n", - "\n", - "print(\"All folders have been created. (所有文件夹均已创建。)\")" + "# Download Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Download From HuggingFace" ] }, { @@ -83,42 +99,59 @@ }, "outputs": [], "source": [ - "import requests\n", - "import zipfile\n", - "import shutil\n", - "import os\n", + "# Modify These\n", + "USER_ID = \"AkitoP\"\n", + "REPO_NAME = \"GPT-SoVITS-v2-aegi\"\n", + "BRANCH = \"main\"\n", + "GPT_PATH = \"new_aegigoe-e100.ckpt\"\n", + "SOVITS_PATH = \"new_aegigoe_e60_s32220.pth\"\n", "\n", - "#@title Import model 导入模型 (HuggingFace)\n", - "hf_link = 'https://huggingface.co/modelloosrvcc/Nagisa_Shingetsu_GPT-SoVITS/resolve/main/Nagisa.zip' #@param {type: \"string\"}\n", + "# Do Not Modify\n", + "HF_BASE = \"https://huggingface.co\"\n", + "REPO_ID = f\"{USER_ID}/{REPO_NAME}\"\n", + "GPT_URL = f\"{HF_BASE}/{REPO_ID}/blob/{BRANCH}/{GPT_PATH}\"\n", + "SOVITS_URL = f\"{HF_BASE}/{REPO_ID}/blob/{BRANCH}/{SOVITS_PATH}\"\n", "\n", - "output_path = '/content/'\n", + "!cd \"/content/GPT-SoVITS/GPT_weights\" && wget \"{GPT_URL}\"\n", + "!cd \"/content/GPT-SoVITS/SoVITS_weights\" && wget \"{SOVITS_URL}\"\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Download From ModelScope" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Modify These\n", + "USER_ID = \"aihobbyist\"\n", + "REPO_NAME = \"GPT-SoVits-V2-models\"\n", + "BRANCH = \"master\"\n", + "GPT_PATH = \"Genshin_Impact/EN/GPT_GenshinImpact_EN_5.1.ckpt\"\n", + "SOVITS_PATH = \"SV_WutheringWaves_CN_1.3.pth\"\n", "\n", - "response = requests.get(hf_link)\n", - "with open(output_path + 'file.zip', 'wb') as file:\n", - " file.write(response.content)\n", + "# Do Not Modify\n", + "HF_BASE = \"https://www.modelscope.cn/models\"\n", + "REPO_ID = f\"{USER_ID}/{REPO_NAME}\"\n", + "GPT_URL = f\"{HF_BASE}/{REPO_ID}/resolve/{BRANCH}/{GPT_PATH}\"\n", + "SOVITS_URL = f\"{HF_BASE}/{REPO_ID}/resolve/{BRANCH}/{SOVITS_PATH}\"\n", "\n", - "with zipfile.ZipFile(output_path + 'file.zip', 'r') as zip_ref:\n", - " zip_ref.extractall(output_path)\n", - "\n", - "os.remove(output_path + \"file.zip\")\n", - "\n", - "source_directory = output_path\n", - "SoVITS_destination_directory = '/content/GPT-SoVITS/SoVITS_weights'\n", - "GPT_destination_directory = '/content/GPT-SoVITS/GPT_weights'\n", - "\n", - "for filename in os.listdir(source_directory):\n", - " if filename.endswith(\".pth\"):\n", - " source_path = os.path.join(source_directory, filename)\n", - " destination_path = os.path.join(SoVITS_destination_directory, filename)\n", - " shutil.move(source_path, destination_path)\n", - "\n", - "for filename in os.listdir(source_directory):\n", - " if filename.endswith(\".ckpt\"):\n", - " source_path = os.path.join(source_directory, filename)\n", - " destination_path = os.path.join(GPT_destination_directory, filename)\n", - " shutil.move(source_path, destination_path)\n", - "\n", - "print(f'Model downloaded. (模型已下载。)')" + "!cd \"/content/GPT-SoVITS/GPT_weights\" && wget \"{GPT_URL}\"\n", + "!cd \"/content/GPT-SoVITS/SoVITS_weights\" && wget \"{SOVITS_URL}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Launch WebUI\n", + "# 启动 WebUI" ] }, { @@ -130,11 +163,7 @@ }, "outputs": [], "source": [ - "# @title launch WebUI 启动WebUI\n", - "!/usr/local/bin/pip install ipykernel\n", - "!sed -i '10s/False/True/' /content/GPT-SoVITS/config.py\n", - "%cd /content/GPT-SoVITS/\n", - "!/usr/local/bin/python webui.py" + "!cd /content/GPT-SoVITS && source activate GPTSoVITS && export is_share=True && python webui.py" ] } ], diff --git a/colab_webui.ipynb b/colab_webui.ipynb index c44ea211..851c9914 100644 --- a/colab_webui.ipynb +++ b/colab_webui.ipynb @@ -10,21 +10,28 @@ "\"Open" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# GPT-SoVITS WebUI" + ] + }, { "cell_type": "markdown", "metadata": { "id": "_o6a8GS2lWQM" }, "source": [ - "# Env Setup (Run Once Only)\n", - "# 环境配置, 只需运行一次" + "## Env Setup (Run Once Only)\n", + "## 环境配置, 只需运行一次" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 1." + "### 1." ] }, { @@ -48,6 +55,8 @@ "\n", "source activate GPTSoVITS\n", "\n", + "pip install ipykernel\n", + "\n", "bash install.sh --source HF --download-uvr5" ] }, @@ -55,7 +64,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2." + "### 2." ] }, { @@ -74,8 +83,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Launch WebUI\n", - "# 启动 WebUI" + "## Launch WebUI\n", + "## 启动 WebUI" ] }, { diff --git a/gpt-sovits_kaggle.ipynb b/gpt-sovits_kaggle.ipynb index 9f28f6f4..764c23c4 100644 --- a/gpt-sovits_kaggle.ipynb +++ b/gpt-sovits_kaggle.ipynb @@ -1,5 +1,13 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "9fd922fb", + "metadata": {}, + "source": [ + "# Deprecated" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/install.sh b/install.sh index 31b4761b..e45b91b1 100644 --- a/install.sh +++ b/install.sh @@ -202,6 +202,8 @@ pip install -r extra-req.txt --no-deps pip install -r requirements.txt +python -c "import nltk; nltk.download(['averaged_perceptron_tagger','averaged_perceptron_tagger_eng','cmudict'])" + if [ "$USE_ROCM" = true ] && [ "$IS_WSL" = true ]; then echo "Update to WSL compatible runtime lib..." location=$(pip show torch | grep Location | awk -F ": " '{print $2}')