From 240e0e289bc26a7b1a594696e1455cd6ddcfb47c Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Sat, 20 Jan 2024 12:09:21 +0800 Subject: [PATCH 001/126] =?UTF-8?q?=E4=BD=BF=E7=94=A8librosa=E5=8A=A0?= =?UTF-8?q?=E8=BD=BD=E9=9F=B3=E9=A2=91=E9=81=BF=E5=85=8Dffmpeg.probe?= =?UTF-8?q?=E8=AF=BB=E5=8F=96metadata=E7=9A=84=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 如题,有用户称在读取音频的metadata时出现问题,这可能是ffprobe造成的。部分站点在存在写入不合规metadata的情况(如一些直播站点将直播间信息写入metadata,其中包含emoji等乱七八糟信息),这在ffmpeg.probe时将会出现问题。 考虑到ffmpeg兼容性和性能比librosa更好,可能在导入前对metadata合规性处理会更好? 但是看到后面注释部分很多地方还是使用librosa实现,所以暂且认为还不用考虑兼容性问题。 --- tools/uvr5/webui.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 11b39f5..92e5faf 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -5,7 +5,8 @@ from tools.i18n.i18n import I18nAuto i18n = I18nAuto() logger = logging.getLogger(__name__) -import ffmpeg +import librosa +import soundfile as sf import torch import sys from mdxnet import MDXNetDereverb @@ -53,16 +54,17 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format need_reformat = 1 done = 0 try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if ( - info["streams"][0]["channels"] == 2 - and info["streams"][0]["sample_rate"] == "44100" - ): + y, sr = librosa.load(inp_path, sr=None) + info = sf.info(inp_path) + channels = info.channels + if channels == 2 and sr == 44100: need_reformat = 0 pre_fun._path_audio_( inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) done = 1 + else: + need_reformat = 1 except: need_reformat = 1 traceback.print_exc() @@ -71,10 +73,8 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format os.path.join(os.environ["TEMP"]), os.path.basename(inp_path), ) - os.system( - "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" - % (inp_path, tmp_path) - ) + y_resampled = librosa.resample(y, sr, 44100) + sf.write(tmp_path, y_resampled, 44100, "PCM_16") inp_path = tmp_path try: if done == 0: @@ -181,4 +181,4 @@ app.queue(concurrency_count=511, max_size=1022).launch( inbrowser=True, server_port=9873, quiet=True, -) \ No newline at end of file +) From 9161907adcc3beae0ba0650289d62d5fe220dabf Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 22 Jan 2024 11:49:03 +0800 Subject: [PATCH 002/126] Update webui.py --- webui.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index e783094..60c6e1b 100644 --- a/webui.py +++ b/webui.py @@ -1,14 +1,13 @@ +import os,shutil,sys,pdb +now_dir = os.getcwd() +sys.path.append(now_dir) import json,yaml,warnings,torch import platform import psutil -import os,shutil import signal -from tools import my_utils warnings.filterwarnings("ignore") torch.manual_seed(233333) -import os,pdb,sys -now_dir = os.getcwd() tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp @@ -33,8 +32,8 @@ for site_packages_root in site_packages_roots: "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" % (now_dir, now_dir, now_dir, now_dir, now_dir) ) +from tools import my_utils import traceback -sys.path.append(now_dir) import shutil import pdb import gradio as gr From 3f7ff570d7479777188f1b77a45e2d53a148dd72 Mon Sep 17 00:00:00 2001 From: zxgov <64576649+YIZXIY@users.noreply.github.com> Date: Mon, 22 Jan 2024 12:59:27 +0800 Subject: [PATCH 003/126] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 156d4d3..246748a 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -338,13 +338,13 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") prompt_language = gr.Dropdown( - label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文")) + label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") ) gr.Markdown(value=i18n("*请填写需要合成的目标文本")) with gr.Row(): text = gr.Textbox(label=i18n("需要合成的文本"), value="") text_language = gr.Dropdown( - label=i18n("需要合成的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文")" + label=i18n("需要合成的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") ) inference_button = gr.Button(i18n("合成语音"), variant="primary") output = gr.Audio(label=i18n("输出的语音")) From ccbc93f4fc9f0d25c843ae93fda5cfb0f8ff151e Mon Sep 17 00:00:00 2001 From: Kenn Zhang Date: Mon, 22 Jan 2024 20:21:18 +0800 Subject: [PATCH 004/126] =?UTF-8?q?=E5=88=9D=E6=AD=A5=E7=89=88=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Docker/damo.sha256 | 3 +++ Docker/download.sh | 11 +++++++++++ Docker/links.sha256 | 12 ++++++++++++ Docker/links.txt | 34 +++++++++++++++++++++++++++++++++ Dockerfile | 46 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 106 insertions(+) create mode 100644 Docker/damo.sha256 create mode 100644 Docker/download.sh create mode 100644 Docker/links.sha256 create mode 100644 Docker/links.txt create mode 100644 Dockerfile diff --git a/Docker/damo.sha256 b/Docker/damo.sha256 new file mode 100644 index 0000000..6e9804d --- /dev/null +++ b/Docker/damo.sha256 @@ -0,0 +1,3 @@ +5bba782a5e9196166233b9ab12ba04cadff9ef9212b4ff6153ed9290ff679025 /workspace/tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pb +b3be75be477f0780277f3bae0fe489f48718f585f3a6e45d7dd1fbb1a4255fc5 /workspace/tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch/model.pb +a5818bb9d933805a916eebe41eb41648f7f9caad30b4bd59d56f3ca135421916 /workspace/tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/model.pb \ No newline at end of file diff --git a/Docker/download.sh b/Docker/download.sh new file mode 100644 index 0000000..447e018 --- /dev/null +++ b/Docker/download.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +echo "Downloading models..." + +aria2c --disable-ipv6 --input-file /workspace/Docker/links.txt --dir /workspace --continue + +echo "Checking SHA256..." + +parallel --will-cite -a /workspace/Docker/links.sha256 "echo -n {} | sha256sum -c" diff --git a/Docker/links.sha256 b/Docker/links.sha256 new file mode 100644 index 0000000..cda6dc1 --- /dev/null +++ b/Docker/links.sha256 @@ -0,0 +1,12 @@ +b1c1e17e9c99547a89388f72048cd6e1b41b5a18b170e86a46dfde0324d63eb1 /workspace/GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt +fc579c1db3c1e21b721001cf99d7a584214280df19b002e200b630a34fa06eb8 /workspace/GPT_SoVITS/pretrained_models/s2D488k.pth +020a014e1e01e550e510f2f61fae5e5f5b6aab40f15c22f1f12f724df507e835 /workspace/GPT_SoVITS/pretrained_models/s2G488k.pth +24164f129c66499d1346e2aa55f183250c223161ec2770c0da3d3b08cf432d3c /workspace/GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin +e53a693acc59ace251d143d068096ae0d7b79e4b1b503fa84c9dcf576448c1d8 /workspace/GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin +39796caa5db18d7f9382d8ac997ac967bfd85f7761014bb807d2543cc844ef05 /workspace/tools/uvr5/uvr5_weights/HP2_all_vocals.pth +45e6b65199e781b4a6542002699be9f19cd3d1cb7d1558bc2bfbcd84674dfe28 /workspace/tools/uvr5/uvr5_weights/HP3_all_vocals.pth +5908891829634926119720241e8573d97cbeb8277110a7512bdb0bd7563258ee /workspace/tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth +8c8fd1582f9aabc363e47af62ddb88df6cae7e064cae75bbf041a067a5e0aee2 /workspace/tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth +01376dd2a571bf3cb9cced680732726d2d732609d09216a610b0d110f133febe /workspace/tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth +56aba59db3bcdd14a14464e62f3129698ecdea62eee0f003b9360923eb3ac79e /workspace/tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth +233bb5c6aaa365e568659a0a81211746fa881f8f47f82d9e864fce1f7692db80 /workspace/tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx \ No newline at end of file diff --git a/Docker/links.txt b/Docker/links.txt new file mode 100644 index 0000000..e6603db --- /dev/null +++ b/Docker/links.txt @@ -0,0 +1,34 @@ +# GPT-SoVITS models +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s1bert25hz-2kh-longer-epoch%3D68e-step%3D50232.ckpt + out=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2D488k.pth + out=GPT_SoVITS/pretrained_models/s2D488k.pth +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2G488k.pth + out=GPT_SoVITS/pretrained_models/s2G488k.pth +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/config.json + out=GPT_SoVITS/pretrained_models/chinese-hubert-base/config.json +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/preprocessor_config.json + out=GPT_SoVITS/pretrained_models/chinese-hubert-base/preprocessor_config.json +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/pytorch_model.bin + out=GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/config.json + out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/config.json +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/pytorch_model.bin + out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin +https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/tokenizer.json + out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/tokenizer.json +# UVR5 +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth + out=tools/uvr5/uvr5_weights/HP2_all_vocals.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth + out=tools/uvr5/uvr5_weights/HP3_all_vocals.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth + out=tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth + out=tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth + out=tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth + out=tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth +https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx + out=tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d39bf21 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,46 @@ +# Base CUDA image +FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04 + +# Install 3rd party apps +ENV DEBIAN_FRONTEND=noninteractive +ENV TZ=Etc/UTC +RUN apt-get update && \ + apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && \ + rm -rf /var/lib/apt/lists/* && \ + git lfs install + + +# Install python packages +WORKDIR /temp +COPY ./requirements.txt /temp/requirements.txt +RUN pip install --no-cache-dir -r requirements.txt + + +# Copy application +WORKDIR /workspace +COPY . /workspace + + +# Download models +RUN chmod +x /workspace/Docker/download.sh && /workspace/Docker/download.sh + +# Clone 3rd repos +WORKDIR /workspace/tools/damo_asr/models +RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \ + (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull) +RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \ + (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull) +RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \ + (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull) + +RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c" + +WORKDIR /workspace + +EXPOSE 9870 +EXPOSE 9871 +EXPOSE 9872 +EXPOSE 9873 +EXPOSE 9874 + +CMD ["python", "webui.py"] \ No newline at end of file From 5c60ad634c4db1b9bcaca72108c95bdbc5f0330c Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Mon, 22 Jan 2024 20:21:29 +0800 Subject: [PATCH 005/126] Add files via upload --- webui.py | 118 ++++++++++++++++++++++++++----------------------------- 1 file changed, 55 insertions(+), 63 deletions(-) diff --git a/webui.py b/webui.py index 60c6e1b..e23353c 100644 --- a/webui.py +++ b/webui.py @@ -1,22 +1,16 @@ -import os,shutil,sys,pdb -now_dir = os.getcwd() -sys.path.append(now_dir) import json,yaml,warnings,torch import platform import psutil +import os import signal warnings.filterwarnings("ignore") torch.manual_seed(233333) +import os,pdb,sys +now_dir = os.getcwd() tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp -if(os.path.exists(tmp)): - for name in os.listdir(tmp): - if(name=="jieba.cache"):continue - path="%s/%s"%(tmp,name) - delete=os.remove if os.path.isfile(path) else shutil.rmtree - delete(path) import site site_packages_roots = [] for path in site.getsitepackages(): @@ -32,15 +26,15 @@ for site_packages_root in site_packages_roots: "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" % (now_dir, now_dir, now_dir, now_dir, now_dir) ) -from tools import my_utils import traceback +sys.path.append(now_dir) import shutil import pdb import gradio as gr from subprocess import Popen import signal -from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share -from tools.i18n.i18n import I18nAuto +from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix +from i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio @@ -125,7 +119,7 @@ def kill_process(pid): def change_label(if_label,path_list): global p_label if(if_label==True and p_label==None): - cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share) + cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s'%(python_exec,path_list,webui_port_subfix) yield i18n("打标工具WebUI已开启") print(cmd) p_label = Popen(cmd, shell=True) @@ -137,7 +131,7 @@ def change_label(if_label,path_list): def change_uvr5(if_uvr5): global p_uvr5 if(if_uvr5==True and p_uvr5==None): - cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share) + cmd = '"%s" tools/uvr5/webui.py "%s" %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5) yield i18n("UVR5已开启") print(cmd) p_uvr5 = Popen(cmd, shell=True) @@ -156,7 +150,6 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number os.environ["is_half"]=str(is_half) os.environ["infer_ttswebui"]=str(webui_port_infer_tts) - os.environ["is_share"]=str(is_share) cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec) yield i18n("TTS推理进程已开启") print(cmd) @@ -171,21 +164,21 @@ def open_asr(asr_inp_dir): global p_asr if(p_asr==None): cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir) - yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("ASR任务开启:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr=None - yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("ASR任务完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("已有正在进行的ASR任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close_asr(): global p_asr if(p_asr!=None): kill_process(p_asr.pid) p_asr=None - return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止ASR进程"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_SoVITS=None def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): @@ -212,21 +205,21 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s with open(tmp_config_path,"w")as f:f.write(json.dumps(data)) cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path) - yield "SoVITS训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("SoVITS训练开始:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_SoVITS = Popen(cmd, shell=True) p_train_SoVITS.wait() p_train_SoVITS=None - yield "SoVITS训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("SoVITS训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Ba(): global p_train_SoVITS if(p_train_SoVITS!=None): kill_process(p_train_SoVITS.pid) p_train_SoVITS=None - return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止SoVITS训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_GPT=None def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1): @@ -255,34 +248,32 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path) - yield "GPT训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("GPT训练开始:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_GPT = Popen(cmd, shell=True) p_train_GPT.wait() p_train_GPT=None - yield "GPT训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("GPT训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("已有正在进行的GPT训练任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Bb(): global p_train_GPT if(p_train_GPT!=None): kill_process(p_train_GPT.pid) p_train_GPT=None - return "已终止GPT训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止GPT训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} ps_slice=[] def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts): global ps_slice - inp = my_utils.clean_path(inp) - opt_root = my_utils.clean_path(opt_root) if(os.path.exists(inp)==False): - yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("输入路径不存在"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if os.path.isfile(inp):n_parts=1 elif os.path.isdir(inp):pass else: - yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("输入路径存在但既不是文件也不是文件夹"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if (ps_slice == []): for i_part in range(n_parts): @@ -290,13 +281,13 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) - yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("切割执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps_slice: p.wait() ps_slice=[] - yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("切割结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的切割任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close_slice(): global ps_slice @@ -307,7 +298,7 @@ def close_slice(): except: traceback.print_exc() ps_slice=[] - return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有切割进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): @@ -337,7 +328,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1a.append(p) - yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() opt = [] @@ -350,9 +341,9 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): with open(path_text, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1a=[] - yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("文本进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的文本任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1a(): global ps1a @@ -363,7 +354,7 @@ def close1a(): except: traceback.print_exc() ps1a=[] - return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有1a进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1b=[] def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): @@ -392,13 +383,13 @@ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) - yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("SSL提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1b: p.wait() ps1b=[] - yield "SSL提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("SSL提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的SSL提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1b(): global ps1b @@ -409,7 +400,7 @@ def close1b(): except: traceback.print_exc() ps1b=[] - return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有1b进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): @@ -439,7 +430,7 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): print(cmd) p = Popen(cmd, shell=True) ps1c.append(p) - yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("语义token提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() opt = ["item_name semantic_audio"] @@ -452,9 +443,9 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): with open(path_semantic, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1c=[] - yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("语义token提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的语义token提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1c(): global ps1c @@ -465,7 +456,7 @@ def close1c(): except: traceback.print_exc() ps1c=[] - return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有语义token进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G ps1abc=[] def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path): @@ -499,7 +490,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:1a-ing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() opt = [] @@ -511,7 +502,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb with open(path_text, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:1a-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1b config={ @@ -536,9 +527,9 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:1a-done, 1b-ing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() - yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:1a1b-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir @@ -565,7 +556,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:1a1b-done, 1cing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() opt = ["item_name semantic_audio"] @@ -576,15 +567,15 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb os.remove(semantic_path) with open(path_semantic, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("进度:all-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc = [] - yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield i18n("一键三连进程结束"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} except: traceback.print_exc() close1abc() - yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield i18n("一键三连中途报错"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} else: - yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的一键三连任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1abc(): global ps1abc @@ -595,12 +586,13 @@ def close1abc(): except: traceback.print_exc() ps1abc=[] - return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有一键三连进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( - value= + value=i18n( i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") + ) ) with gr.Tabs(): with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 @@ -650,7 +642,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button]) open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button]) close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button]) - with gr.TabItem(i18n("1-GPT-SoVITS-TTS")): + with gr.TabItem("1-GPT-SoVITS-TTS"): with gr.Row(): exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True) gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False) @@ -665,9 +657,9 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: label=i18n("*训练集音频文件目录"), # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", interactive=True, - placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。") + placeholder=i18n("训练集音频文件目录 拼接 list文件里波形对应的文件名。") ) - gr.Markdown(value=i18n("1Aa-文本内容")) + gr.Markdown(value="1Aa-文本内容") with gr.Row(): gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True) bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False) @@ -705,7 +697,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: with gr.Row(): batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) total_epoch = gr.Slider(minimum=1,maximum=20,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) - text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True) + text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label="文本模块学习率权重",value=0.4,interactive=True) save_every_epoch = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) @@ -746,7 +738,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - share=is_share, + share=True, server_port=webui_port_main, quiet=True, ) From 191f27f699f53d6f27e1f6f74c2f36371730b773 Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Mon, 22 Jan 2024 20:24:45 +0800 Subject: [PATCH 006/126] Add files via upload --- i18n/locale/en_US.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 160f103..b7ea99a 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -3,7 +3,7 @@ "UVR5已开启": "UVR5 opened ", "UVR5已关闭": "UVR5 closed", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.", - "0-前置数据集获取工具": "0-Fech dataset", + "0-前置数据集获取工具": "0-Fetch dataset", "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)", "是否开启UVR5-WebUI": "Open UVR5-WebUI", "UVR5进程输出信息": "UVR5 process output log", @@ -128,7 +128,7 @@ "已终止所有一键三连进程": "All one-clicking formatting tasks has been stopped", "已终止所有切割进程": "All audio slicing tasks has been stopped", "已终止所有语义token进程": "All semantics token tasks has been stopped", - "按中文句号。切": "按中文句号。切", + "按中文句号。切": "Slice by Chinese punct", "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.", "文本进程执行中": "Text processing", "文本进程结束": "Finished text processing", From 40603d00c2d37df2f170a1519e3e1a594a8702b4 Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Mon, 22 Jan 2024 20:25:13 +0800 Subject: [PATCH 007/126] Add files via upload --- tools/uvr5/webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 1ede1b4..71e7ebc 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -119,7 +119,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format with gr.Blocks(title="RVC WebUI") as app: gr.Markdown( value= - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." + i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) with gr.Tabs(): with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): @@ -144,7 +144,7 @@ with gr.Blocks(title="RVC WebUI") as app: minimum=0, maximum=20, step=1, - label="人声提取激进程度", + label=i18n("人声提取激进程度"), value=10, interactive=True, visible=False, # 先不开放调整 From 20e7be87c6d8a7a6375681f954048b80a2dcf889 Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Mon, 22 Jan 2024 20:35:46 +0800 Subject: [PATCH 008/126] Add files via upload --- webui.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/webui.py b/webui.py index e23353c..4eaf901 100644 --- a/webui.py +++ b/webui.py @@ -1,3 +1,6 @@ +import os,shutil,sys,pdb +now_dir = os.getcwd() +sys.path.append(now_dir) import json,yaml,warnings,torch import platform import psutil @@ -11,6 +14,12 @@ now_dir = os.getcwd() tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp +if(os.path.exists(tmp)): + for name in os.listdir(tmp): + if(name=="jieba.cache"):continue + path="%s/%s"%(tmp,name) + delete=os.remove if os.path.isfile(path) else shutil.rmtree + delete(path) import site site_packages_roots = [] for path in site.getsitepackages(): @@ -26,14 +35,14 @@ for site_packages_root in site_packages_roots: "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" % (now_dir, now_dir, now_dir, now_dir, now_dir) ) +from tools import my_utils import traceback -sys.path.append(now_dir) import shutil import pdb import gradio as gr from subprocess import Popen import signal -from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix +from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share from i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile @@ -119,7 +128,7 @@ def kill_process(pid): def change_label(if_label,path_list): global p_label if(if_label==True and p_label==None): - cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s'%(python_exec,path_list,webui_port_subfix) + cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share) yield i18n("打标工具WebUI已开启") print(cmd) p_label = Popen(cmd, shell=True) @@ -131,7 +140,7 @@ def change_label(if_label,path_list): def change_uvr5(if_uvr5): global p_uvr5 if(if_uvr5==True and p_uvr5==None): - cmd = '"%s" tools/uvr5/webui.py "%s" %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5) + cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share) yield i18n("UVR5已开启") print(cmd) p_uvr5 = Popen(cmd, shell=True) @@ -150,6 +159,7 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number os.environ["is_half"]=str(is_half) os.environ["infer_ttswebui"]=str(webui_port_infer_tts) + os.environ["is_share"]=str(is_share) cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec) yield i18n("TTS推理进程已开启") print(cmd) @@ -267,6 +277,8 @@ def close1Bb(): ps_slice=[] def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts): global ps_slice + inp = my_utils.clean_path(inp) + opt_root = my_utils.clean_path(opt_root) if(os.path.exists(inp)==False): yield i18n("输入路径不存在"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} return @@ -588,11 +600,10 @@ def close1abc(): ps1abc=[] return i18n("已终止所有一键三连进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -with gr.Blocks(title="GPT-SoVITS WebUI") as app: +with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: gr.Markdown( - value=i18n( + value= i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") - ) ) with gr.Tabs(): with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 @@ -657,9 +668,9 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: label=i18n("*训练集音频文件目录"), # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", interactive=True, - placeholder=i18n("训练集音频文件目录 拼接 list文件里波形对应的文件名。") + placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。") ) - gr.Markdown(value="1Aa-文本内容") + gr.Markdown(value=i18n("1Aa-文本内容")) with gr.Row(): gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True) bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False) @@ -697,7 +708,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: with gr.Row(): batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) total_epoch = gr.Slider(minimum=1,maximum=20,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) - text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label="文本模块学习率权重",value=0.4,interactive=True) + text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True) save_every_epoch = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) From 8deb526b5b5ff2e147fdfa4b546a46f63c42df2c Mon Sep 17 00:00:00 2001 From: Rice Cake Date: Mon, 22 Jan 2024 20:40:44 +0800 Subject: [PATCH 009/126] update i18n --- webui.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/webui.py b/webui.py index 4eaf901..ebd1dc7 100644 --- a/webui.py +++ b/webui.py @@ -4,13 +4,10 @@ sys.path.append(now_dir) import json,yaml,warnings,torch import platform import psutil -import os import signal warnings.filterwarnings("ignore") torch.manual_seed(233333) -import os,pdb,sys -now_dir = os.getcwd() tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp @@ -43,7 +40,7 @@ import gradio as gr from subprocess import Popen import signal from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share -from i18n.i18n import I18nAuto +from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio @@ -653,7 +650,7 @@ with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button]) open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button]) close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button]) - with gr.TabItem("1-GPT-SoVITS-TTS"): + with gr.TabItem(i18n("1-GPT-SoVITS-TTS")): with gr.Row(): exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True) gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False) @@ -749,7 +746,7 @@ with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - share=True, + share=is_share, server_port=webui_port_main, quiet=True, ) From e61136f144836f53f45a571fb76cf9a490906bef Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 22 Jan 2024 20:52:04 +0800 Subject: [PATCH 010/126] Update my_utils.py --- tools/my_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/my_utils.py b/tools/my_utils.py index abb2a44..ce61984 100644 --- a/tools/my_utils.py +++ b/tools/my_utils.py @@ -1,4 +1,4 @@ -import platform +import platform,os import ffmpeg import numpy as np From 38245a2baa533d75cfaf78682e2a831f3dd14605 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Mon, 22 Jan 2024 20:57:53 +0800 Subject: [PATCH 011/126] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 127aebf..7649d7b 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ sudo apt-get install python3.9-distutils #### Pip Packages ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba ``` #### Additional Requirements From 1dba692448c55542a7e01e3c1ad615ec70f2fd60 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 22 Jan 2024 21:52:48 +0800 Subject: [PATCH 012/126] Delete GPT_SoVITS/prepare_datasets/0-pipeline.py --- GPT_SoVITS/prepare_datasets/0-pipeline.py | 81 ----------------------- 1 file changed, 81 deletions(-) delete mode 100644 GPT_SoVITS/prepare_datasets/0-pipeline.py diff --git a/GPT_SoVITS/prepare_datasets/0-pipeline.py b/GPT_SoVITS/prepare_datasets/0-pipeline.py deleted file mode 100644 index 4979ed2..0000000 --- a/GPT_SoVITS/prepare_datasets/0-pipeline.py +++ /dev/null @@ -1,81 +0,0 @@ -import os, torch, sys -from subprocess import Popen - -now_dir = os.getcwd() -sys.path.append(now_dir) -from config import ( - text_path, - wav_dir, - n_card, - exp_name, - n_parts, - exp_dir, -) - -os.makedirs("%s/logs_s1" % exp_dir, exist_ok=True) -os.makedirs("%s/logs_s2" % exp_dir, exist_ok=True) -##############step1 -ps = [] -for i_part in range(n_parts): - cmd = "python prepare/1-get-text.py %s %s %s %s %s %s" % ( - text_path, - wav_dir, - exp_name, - i_part, - n_parts, - i_part % n_card, - ) - print(cmd) - p = Popen(cmd, shell=True) - ps.append(p) -for p in ps: - p.wait() - -opt = [] -for i_part in range(n_parts): - txt_path = "%s/2-name2text-%s.txt" % (exp_dir, i_part) - with open(txt_path, "r") as f: - opt += f.read().strip("\n").split("\n") - os.remove(txt_path) -with open("%s/2-name2text.txt" % exp_dir, "w") as f: - f.write("\n".join(opt) + "\n") - -############step2 -ps = [] -for i_part in range(n_parts): - cmd = "python prepare/2-get-hubert-wav32k.py %s %s %s %s %s %s" % ( - text_path, - wav_dir, - exp_name, - i_part, - n_parts, - i_part % n_card, - ) - print(cmd) - p = Popen(cmd, shell=True) - ps.append(p) -for p in ps: - p.wait() -#############step3 -ps = [] -for i_part in range(n_parts): - cmd = "python prepare/3-get-semantic.py %s %s %s %s %s" % ( - text_path, - exp_name, - i_part, - n_parts, - i_part % n_card, - ) - print(cmd) - p = Popen(cmd, shell=True) - ps.append(p) -for p in ps: - p.wait() -opt = ["item_name semantic_audio"] -for i_part in range(n_parts): - semantic_path = "%s/6-name2semantic-%s.tsv" % (exp_dir, i_part) - with open(semantic_path, "r") as f: - opt += f.read().strip("\n").split("\n") - os.remove(semantic_path) -with open("%s/6-name2semantic.tsv" % exp_dir, "w") as f: - f.write("\n".join(opt) + "\n") From 65ee0385363ee19e1751b75c6e52cbd61c076836 Mon Sep 17 00:00:00 2001 From: koji Date: Mon, 22 Jan 2024 09:34:47 -0500 Subject: [PATCH 013/126] fix typo in jp doc --- docs/ja/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ja/README.md b/docs/ja/README.md index 1e7eebf..d58d090 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -24,7 +24,7 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb ## 機能: -1. **セロショット TTS:** 5秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。 +1. **ゼロショット TTS:** 5秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。 2. **数ショット TTS:** わずか1分間のトレーニングデータでモデルを微調整し、音声の類似性とリアリズムを向上。 From c1a24ad37079252da7a9ee1e87038ef285bf8fac Mon Sep 17 00:00:00 2001 From: Shouta Yoshikai Date: Tue, 23 Jan 2024 00:10:39 +0900 Subject: [PATCH 014/126] update ja_JP.json update webui.py for i18n --- i18n/locale/ja_JP.json | 159 ++++++++++++++++++++++++++++++++++++++++- tools/uvr5/webui.py | 2 +- webui.py | 82 ++++++++++----------- 3 files changed, 198 insertions(+), 45 deletions(-) diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index c5b33ff..03143d1 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -111,12 +111,12 @@ "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理対象音声フォルダーのパスを入力してください(エクスプローラーのアドレスバーからコピーしてください)", "输入待处理音频文件路径(默认是正确格式示例)": "処理対象音声ファイルのパスを入力してください(デフォルトは正しいフォーマットの例です)", "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "入力ソースの音量エンベロープと出力音量エンベロープの融合率 1に近づくほど、出力音量エンベロープの割合が高くなる", - "输入监听": "输入监听", + "输入监听": "入力を監視", "输入训练文件夹路径": "トレーニング用フォルダのパスを入力してください", "输入设备": "入力デバイス", "输入降噪": "入力ノイズの低減", "输出信息": "出力情報", - "输出变声": "输出变声", + "输出变声": "音声変換の出力", "输出设备": "出力デバイス", "输出降噪": "出力ノイズの低減", "输出音频(右下角三个点,点了可以下载)": "出力音声(右下の三点をクリックしてダウンロードできます)", @@ -131,5 +131,158 @@ "音调设置": "音程設定", "音频设备(请使用同种类驱动)": "オーディオデバイス(同じ種類のドライバーを使用してください)", "音高算法": "ピッチアルゴリズム", - "额外推理时长": "追加推論時間" + "额外推理时长": "追加推論時間", + "打标工具WebUI已开启": "ラベリングツールWebUIが開始されました", + "打标工具WebUI已关闭": "ラベリングツールWebUIが終了しました", + "UVR5已开启": "UVR5が開始されました", + "UVR5已关闭": "UVR5が終了しました", + "TTS推理进程已开启": "TTS推論プロセスが開始されました", + "TTS推理进程已关闭": "TTS推理プロセスが終了しました", + "ASR任务开启": "ASRタスクが開始されました", + "ASR任务完成": "ASRタスクが完了しました", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "既に進行中のASRタスクがあります。新たなタスクを開始する前に終了させてください", + "已终止ASR进程": "ASRプロセスが終了しました", + "SoVITS训练开始": "SoVITSのトレーニングが開始されました", + "SoVITS训练完成": "SoVITSのトレーニングが完了しました", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "既に進行中のSoVITSのトレーニングタスクがあります。新たなタスクを開始する前に終了させてください", + "已终止SoVITS训练": "SoVITSのトレーニングが終了しました", + "GPT训练开始": "GPTトレーニング開始", + "GPT训练完成": "GPTトレーニング完了", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "既に進行中のGPTトレーニングタスクがあります。新たなタスクを開始する前に終了させてください", + "已终止GPT训练": "GPTトレーニングが終了しました", + "切割执行中": "カット中", + "切割结束": "カット終了", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "既に進行中のカットタスクがあります。新たなタスクを開始する前に終了させてください", + "已终止所有切割进程": "すべてのカットプロセスが終了しました", + "文本进程执行中": "テキストプロセス実行中", + "文本进程结束": "テキストプロセス終了", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "既に進行中のテキストタスクがあります。新たなタスクを開始する前に終了させてください", + "已终止所有文本进程": "すべてのテキストプロセスが終了しました", + "SSL提取进程执行中": "SSL抽出プロセス実行中", + "SSL提取进程结束": "SSL抽出プロセス終了", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "既に進行中のSSL抽出タスクがあります。新たなタスクを開始する前に終了させてください", + "已终止所有1b进程": "すべての1bプロセスが終了しました", + "语义token提取进程执行中": "意味トークン抽出プロセス実行中", + "语义token提取进程结束": "意味トークン抽出プロセス終了", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "既に進行中の意味トークン抽出タスクがあります。新たなタスクを開始する前に終了させてください", + "已终止所有语义token进程": "すべての意味トークンプロセスが終了しました", + "语义token提取进程已开始": "意味トークン抽出プロセスが開始されました", + "语义token提取进程已结束": "意味トークン抽出プロセスが終了しました", + "语义token提取进程已终止": "意味トークン抽出プロセスが終了しました", + "语义token提取进程正在进行中": "意味トークン抽出プロセスが進行中です", + "语义token提取任务已完成": "意味トークン抽出タスクが完了しました", + "语义token提取任务正在进行中": "意味トークン抽出タスクが進行中です", + "语义token提取任务已开始": "意味トークン抽出タスクが開始されました", + "语义token提取任务已终止": "意味トークン抽出タスクが終了しました", + "ワンクリックで三つのプロセスを開始": "ワンクリックで三つのプロセスを開始します", + "ワンクリックで三つのプロセスを終了": "ワンクリックで三つのプロセスを終了します", + "ワンクリックで三つのプロセスを中止": "ワンクリックで三つのプロセスを中止します", + "ワンクリックで三つのプロセスが進行中": "ワンクリックで三つのプロセスが進行中です", + "ワンクリックで三つのタスクが完了": "ワンクリックで三つのタスクが完了しました", + "ワンクリックで三つのタスクが進行中": "ワンクリックで三つのタスクが進行中です", + "ワンクリックで三つのタスクを開始": "ワンクリックで三つのタスクを開始します", + "ワンクリックで三つのタスクを中止": "ワンクリックで三つのタスクを中止します", + "0-前置数据集获取工具": "0-前置データセット取得ツール", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5ボーカル伴奏分離&リバーブディレイ除去ツール", + "是否开启UVR5-WebUI": "UVR5-WebUIを開始しますか", + "UVR5进程输出信息": "UVR5プロセス出力情報", + "0b-语音切分工具": "0b-音声切断ツール", + "音频自动切分输入路径,可文件可文件夹": "オーディオ自動切断入力パス、ファイルまたはフォルダー可", + "切分后的子音频的输出根目录": "切断後のサブオーディオの出力ルートディレクトリ", + "threshold:音量小于这个值视作静音的备选切割点": "threshold:音量がこの値より小さい場合は、サイレントと見なされる代替切断点", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:各セクションの最小長さ、最初のセクションが短すぎる場合は、この値を超えるまで後続のセクションと連続しています", + "min_interval:最短切割间隔": "min_interval:最短切断間隔", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:音量曲線の計算方法、小さいほど精度が高く計算量が多くなります(精度が高いほど効果が良いとは限りません)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept:切断後のサイレントの最大保持長", + "开启语音切割": "音声切断を開始", + "终止语音切割": "音声切断を終了", + "max:归一化后最大值多少": "max:正規化後の最大値", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix:どの程度の割合で正規化後のオーディオを混合するか", + "切割使用的进程数": "切断に使用されるプロセス数", + "语音切割进程输出信息": "音声切断プロセス出力情報", + "0c-中文批量离线ASR工具": "0c-中国語バッチオフラインASRツール", + "开启离线批量ASR": "オフラインバッチASRを開始", + "终止ASR进程": "ASRプロセスを終了", + "批量ASR(中文only)输入文件夹路径": "バッチASR(中国語のみ)入力フォルダパス", + "ASR进程输出信息": "ASRプロセス出力情報", + "0d-语音文本校对标注工具": "0d-音声テキスト校正アノテーションツール", + "是否开启打标WebUI": "ラベリングWebUIを開始しますか", + "打标数据标注文件路径": "ラベリングデータアノテーションファイルパス", + "打标工具进程输出信息": "ラベリングツールプロセス出力情報", + "文本进程结束, SSL提取进程执行中": "テキストプロセス終了、SSL抽出プロセス実行中", + "文本进程结束, SSL提取进程结束": "テキストプロセス終了、SSL抽出プロセス終了", + "一键三连进程执行中": "ワンクリック三連プロセス実行中", + "一键三连进程结束": "ワンクリック三連プロセス終了", + "SoVITS训练进程执行中": "SoVITS訓練プロセス実行中", + "SoVITS训练进程结束": "SoVITS訓練プロセス終了", + "GPT训练进程执行中": "GPT訓練プロセス実行中", + "GPT训练进程结束": "GPT訓練プロセス終了", + "推理进程执行中": "推論プロセス実行中", + "推理进程结束": "推論プロセス終了", + "预训练的SoVITS-G模型路径": "事前学習済みのSoVITS-Gモデルのパス", + "预训练的SoVITS-D模型路径": "事前学習済みのSoVITS-Dモデルのパス", + "预训练的GPT模型路径": "事前学習済みのGPTモデルのパス", + "GPU卡号以-分割,每个卡号一个进程": "GPUカード番号は-で区切り、各カード番号に1つのプロセス", + "预训练的中文BERT模型路径": "事前学習済みの中国語BERTモデルのパス", + "开启文本获取": "テキスト取得を開始", + "终止文本获取进程": "テキスト取得プロセスを終了", + "文本进程输出信息": "テキストプロセス出力情報", + "预训练的SSL模型路径": "事前学習済みのSSLモデルのパス", + "开启SSL提取": "SSL抽出を開始", + "终止SSL提取进程": "SSL抽出プロセスを終了", + "SSL进程输出信息": "SSLプロセス出力情報", + "开启语义token提取": "セマンティックトークン抽出を開始", + "终止语义token提取进程": "セマンティックトークン抽出プロセスを終了", + "语义token提取进程输出信息": "セマンティックトークン抽出プロセス出力情報", + "开启一键三连": "ワンクリック三連を開始", + "终止一键三连": "ワンクリック三連を終了", + "一键三连进程输出信息": "ワンクリック三連プロセス出力情報", + "1A-訓練集格式化工具": "1A-トレーニングセットフォーマットツール", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/実験名ディレクトリには、23456で始まるファイルとフォルダが存在する必要があります", + "*文本标注文件": "*テキストアノテーションファイル", + "*训练集音频文件目录": "*トレーニングセットオーディオファイルディレクトリ", + "训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。": "トレーニングセットオーディオファイルディレクトリ-結合-listファイル内の波形に対応するファイル名(フルパスではありません)。", + "1Aa-文本内容": "1Aa-テキスト内容", + "1Ac-语义token提取": "1Ac-セマンティックトークン抽出", + "1Aabc-训练集格式化一键三连": "1Aabc-トレーニングセットフォーマットワンクリック三連", + "1B-微调训练": "1B-微調整訓練", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS訓練。共有モデルファイルはSoVITS_weightsに出力されます。", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT訓練。共有モデルファイルはGPT_weightsに出力されます。", + "1C-推理": "1C-推論", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-ボイスチェンジ", + "开启SoVITS训练": "SoVITS訓練を開始", + "终止SoVITS训练": "SoVITS訓練を終了", + "SoVITS训练进程输出信息": "SoVITS訓練プロセス出力情報", + "开启GPT训练": "GPT訓練を開始", + "终止GPT训练": "GPT訓練を終了", + "GPT训练进程输出信息": "GPT訓練プロセス出力情報", + "是否开启TTS推理WebUI": "TTS推理WebUIを開始しますか", + "TTS推理WebUI进程输出信息": "TTS推理WebUIプロセス出力情報", + "施工中,请静候佳音": "工事中、お待ちください", + "*实验/模型名": "実験/モデル名", + "1A-训练集格式化工具": "1A-トレーニングセットフォーマットツール", + "1Ab-SSL自监督特征提取": "1Ab-SSL自己監督による特徴抽出", + "总训练轮数total_epoch,不建议太高": "トータルトレーニングラウンド数total_epoch、あまり高く設定しないことをお勧めします", + "文本模块学习率权重": "テキストモジュールの学習率の重み", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weightsとGPT_weightsに保存された訓練済みモデルを選択します。デフォルトの1つはベースモデルで、5秒間のZero Shot TTSを体験するために使用されます。", + "*GPT模型列表": "*GPTモデルリスト", + "*SoVITS模型列表": "*SoVITSモデルリスト", + "GPU卡号,只能填1个整数": "GPUカード番号、整数のみ入力可能", + "刷新模型路径": "モデルパスを更新", + "*请上传并填写参考信息": "*参考情報をアップロードして記入してください", + "请上传参考音频": "*参考音声をアップロードしてください", + "参考音频的文本": "*参考音声のテキスト", + "参考音频的语种": "参考音声の言語", + "*请填写需要合成的目标文本": "*合成する目標テキストを入力してください", + "需要合成的文本": "*合成するテキスト", + "需要合成的语种": "*合成する言語", + "合成语音": "音声合成", + "输出的语音": "*出力音声", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "テキスト分割ツール。長すぎるテキストを合成すると結果が必ずしも良くない可能性があるため、長すぎる場合は先に切ることをお勧めします。合成はテキストの改行に基づいて分割してから再結合します。", + "需要合成的切分前文本": "合成する前の分割テキストが必要", + "凑五句一切": "五文を一つにまとめる", + "凑50字一切": "50文字を一つにまとめる", + "按中文句号。切": "中国語の句点で切る。", + "切分后文本": "分割後のテキスト", + "后续将支持混合语种编码文本输入。": "今後、混合言語エンコードテキストの入力をサポートします。" } diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 1ede1b4..17603e0 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -119,7 +119,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format with gr.Blocks(title="RVC WebUI") as app: gr.Markdown( value= - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." + i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) with gr.Tabs(): with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): diff --git a/webui.py b/webui.py index 60c6e1b..386cfa0 100644 --- a/webui.py +++ b/webui.py @@ -171,21 +171,21 @@ def open_asr(asr_inp_dir): global p_asr if(p_asr==None): cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir) - yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield f"{i18n('ASR任务开启')}:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr=None - yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("ASR任务完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield f"{i18n('已有正在进行的ASR任务,需先终止才能开启下一次任务')}",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close_asr(): global p_asr if(p_asr!=None): kill_process(p_asr.pid) p_asr=None - return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止ASR进程"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_SoVITS=None def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): @@ -212,21 +212,21 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s with open(tmp_config_path,"w")as f:f.write(json.dumps(data)) cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path) - yield "SoVITS训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("SoVITS训练开始:%s"%cmd),{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_SoVITS = Popen(cmd, shell=True) p_train_SoVITS.wait() p_train_SoVITS=None - yield "SoVITS训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("SoVITS训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield f"{i18n('已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务')}",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Ba(): global p_train_SoVITS if(p_train_SoVITS!=None): kill_process(p_train_SoVITS.pid) p_train_SoVITS=None - return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止SoVITS训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_GPT=None def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1): @@ -255,21 +255,21 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path) - yield "GPT训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield f"{i18n('GPT训练开始')}:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_GPT = Popen(cmd, shell=True) p_train_GPT.wait() p_train_GPT=None - yield "GPT训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("GPT训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield i18n("已有正在进行的GPT训练任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Bb(): global p_train_GPT if(p_train_GPT!=None): kill_process(p_train_GPT.pid) p_train_GPT=None - return "已终止GPT训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return i18n("已终止GPT训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} ps_slice=[] def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts): @@ -277,12 +277,12 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k inp = my_utils.clean_path(inp) opt_root = my_utils.clean_path(opt_root) if(os.path.exists(inp)==False): - yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("输入路径不存在"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if os.path.isfile(inp):n_parts=1 elif os.path.isdir(inp):pass else: - yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("输入路径存在但既不是文件也不是文件夹"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if (ps_slice == []): for i_part in range(n_parts): @@ -290,13 +290,13 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) - yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("切割执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps_slice: p.wait() ps_slice=[] - yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("切割结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的切割任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close_slice(): global ps_slice @@ -307,7 +307,7 @@ def close_slice(): except: traceback.print_exc() ps_slice=[] - return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有切割进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): @@ -337,7 +337,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1a.append(p) - yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() opt = [] @@ -350,9 +350,9 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): with open(path_text, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1a=[] - yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("文本进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的文本任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1a(): global ps1a @@ -363,7 +363,7 @@ def close1a(): except: traceback.print_exc() ps1a=[] - return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有文本进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1b=[] def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): @@ -392,13 +392,13 @@ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) - yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("SSL提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1b: p.wait() ps1b=[] - yield "SSL提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("SSL提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的SSL提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1b(): global ps1b @@ -409,7 +409,7 @@ def close1b(): except: traceback.print_exc() ps1b=[] - return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有1b进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): @@ -439,7 +439,7 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): print(cmd) p = Popen(cmd, shell=True) ps1c.append(p) - yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("语义token提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() opt = ["item_name semantic_audio"] @@ -452,9 +452,9 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): with open(path_semantic, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1c=[] - yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield i18n("语义token提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的语义token提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1c(): global ps1c @@ -465,7 +465,7 @@ def close1c(): except: traceback.print_exc() ps1c=[] - return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有语义token进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G ps1abc=[] def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path): @@ -499,11 +499,11 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() opt = [] - for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part) + for i_part in range(all_parts): txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) with open(txt_path, "r",encoding="utf8") as f: opt += f.read().strip("\n").split("\n") @@ -511,7 +511,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb with open(path_text, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程结束"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1b config={ @@ -536,9 +536,9 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程结束, SSL提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() - yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("文本进程结束, SSL提取进程结束"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir @@ -565,7 +565,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("SSL提取进程结束, 语义token提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() opt = ["item_name semantic_audio"] @@ -576,15 +576,15 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb os.remove(semantic_path) with open(path_semantic, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("语义token提取进程结束"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc = [] - yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield i18n("一键三连进程结束"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} except: traceback.print_exc() close1abc() - yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield i18n("一键三连中途报错"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} else: - yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield i18n("已有正在进行的一键三连任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1abc(): global ps1abc @@ -595,7 +595,7 @@ def close1abc(): except: traceback.print_exc() ps1abc=[] - return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return i18n("已终止所有一键三连进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( From 34b4e02092566ef1c7b56d88b5efb29503c16073 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Mon, 22 Jan 2024 23:19:09 +0800 Subject: [PATCH 015/126] Update README.md --- docs/cn/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cn/README.md b/docs/cn/README.md index 4c7f530..27c5668 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -51,7 +51,7 @@ bash install.sh #### Pip包 ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers ``` #### 额外要求 From 9e56f2b6b0d4c30c27416ae48ac4487b33733ef8 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Mon, 22 Jan 2024 23:22:03 +0800 Subject: [PATCH 016/126] Update README.md --- docs/ja/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ja/README.md b/docs/ja/README.md index 1e7eebf..34c673b 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -57,7 +57,7 @@ sudo apt-get install python3.9-distutils #### Pip パッケージ ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers ``` #### 追加要件 From 872134c846bcb8f1909a3f5aff68a6aa67643f68 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 22 Jan 2024 23:26:18 +0800 Subject: [PATCH 017/126] Update t2s_model.py --- GPT_SoVITS/AR/models/t2s_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/GPT_SoVITS/AR/models/t2s_model.py b/GPT_SoVITS/AR/models/t2s_model.py index 9f8330b..083dc09 100644 --- a/GPT_SoVITS/AR/models/t2s_model.py +++ b/GPT_SoVITS/AR/models/t2s_model.py @@ -302,6 +302,8 @@ class Text2SemanticDecoder(nn.Module): xy_dec[:, -1] ) ##不用改,如果用了cache的默认就是只有一帧,取最后一帧一样的 # samples = topk_sampling(logits, top_k=top_k, top_p=1.0, temperature=temperature) + if(idx==0):###第一次跑不能EOS否则没有了 + logits = logits[:, :-1] ###刨除1024终止符号的概率 samples = sample( logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35 )[0].unsqueeze(0) From 7bd33f9c8b8892497ec642f1297f9139c2bca107 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 22 Jan 2024 23:42:33 +0800 Subject: [PATCH 018/126] Add files via upload --- webui.py | 94 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/webui.py b/webui.py index ebd1dc7..1520226 100644 --- a/webui.py +++ b/webui.py @@ -171,21 +171,21 @@ def open_asr(asr_inp_dir): global p_asr if(p_asr==None): cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir) - yield i18n("ASR任务开启:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr=None - yield i18n("ASR任务完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的ASR任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close_asr(): global p_asr if(p_asr!=None): kill_process(p_asr.pid) p_asr=None - return i18n("已终止ASR进程"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_SoVITS=None def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): @@ -212,21 +212,21 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s with open(tmp_config_path,"w")as f:f.write(json.dumps(data)) cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path) - yield i18n("SoVITS训练开始:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "SoVITS训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_SoVITS = Popen(cmd, shell=True) p_train_SoVITS.wait() p_train_SoVITS=None - yield i18n("SoVITS训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "SoVITS训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Ba(): global p_train_SoVITS if(p_train_SoVITS!=None): kill_process(p_train_SoVITS.pid) p_train_SoVITS=None - return i18n("已终止SoVITS训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} p_train_GPT=None def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1): @@ -255,21 +255,21 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path) - yield i18n("GPT训练开始:%s")%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "GPT训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_GPT = Popen(cmd, shell=True) p_train_GPT.wait() p_train_GPT=None - yield i18n("GPT训练完成"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "GPT训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的GPT训练任务,需先终止才能开启下一次任务"),{"__type__":"update","visible":False},{"__type__":"update","visible":True} + yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Bb(): global p_train_GPT if(p_train_GPT!=None): kill_process(p_train_GPT.pid) p_train_GPT=None - return i18n("已终止GPT训练"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + return "已终止GPT训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} ps_slice=[] def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts): @@ -277,12 +277,12 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k inp = my_utils.clean_path(inp) opt_root = my_utils.clean_path(opt_root) if(os.path.exists(inp)==False): - yield i18n("输入路径不存在"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if os.path.isfile(inp):n_parts=1 elif os.path.isdir(inp):pass else: - yield i18n("输入路径存在但既不是文件也不是文件夹"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False} return if (ps_slice == []): for i_part in range(n_parts): @@ -290,13 +290,13 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) - yield i18n("切割执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps_slice: p.wait() ps_slice=[] - yield i18n("切割结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的切割任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close_slice(): global ps_slice @@ -307,7 +307,7 @@ def close_slice(): except: traceback.print_exc() ps_slice=[] - return i18n("已终止所有切割进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): @@ -337,7 +337,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1a.append(p) - yield i18n("文本进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() opt = [] @@ -350,9 +350,9 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): with open(path_text, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1a=[] - yield i18n("文本进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的文本任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1a(): global ps1a @@ -363,7 +363,7 @@ def close1a(): except: traceback.print_exc() ps1a=[] - return i18n("已终止所有1a进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1b=[] def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): @@ -392,13 +392,13 @@ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) - yield i18n("SSL提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1b: p.wait() ps1b=[] - yield i18n("SSL提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "SSL提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的SSL提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1b(): global ps1b @@ -409,7 +409,7 @@ def close1b(): except: traceback.print_exc() ps1b=[] - return i18n("已终止所有1b进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): @@ -439,10 +439,10 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): print(cmd) p = Popen(cmd, shell=True) ps1c.append(p) - yield i18n("语义token提取进程执行中"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() - opt = ["item_name semantic_audio"] + opt = ["item_name\tsemantic_audio"] path_semantic = "%s/6-name2semantic.tsv" % opt_dir for i_part in range(all_parts): semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) @@ -452,9 +452,9 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): with open(path_semantic, "w", encoding="utf8") as f: f.write("\n".join(opt) + "\n") ps1c=[] - yield i18n("语义token提取进程结束"),{"__type__":"update","visible":True},{"__type__":"update","visible":False} + yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield i18n("已有正在进行的语义token提取任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1c(): global ps1c @@ -465,7 +465,7 @@ def close1c(): except: traceback.print_exc() ps1c=[] - return i18n("已终止所有语义token进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G ps1abc=[] def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path): @@ -475,7 +475,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb try: #############################1a path_text="%s/2-name2text.txt" % opt_dir - if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and os.path.getsize(path_text)<10)): + if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and len(open(path_text,"r",encoding="utf8").read().strip("\n").split("\n"))<2)): config={ "inp_text":inp_text, "inp_wav_dir":inp_wav_dir, @@ -499,7 +499,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield i18n("进度:1a-ing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() opt = [] @@ -511,7 +511,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb with open(path_text, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield i18n("进度:1a-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1b config={ @@ -536,9 +536,9 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield i18n("进度:1a-done, 1b-ing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() - yield i18n("进度:1a1b-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir @@ -565,10 +565,10 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield i18n("进度:1a1b-done, 1cing"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1abc:p.wait() - opt = ["item_name semantic_audio"] + opt = ["item_name\tsemantic_audio"] for i_part in range(all_parts): semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) with open(semantic_path, "r",encoding="utf8") as f: @@ -576,15 +576,15 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb os.remove(semantic_path) with open(path_semantic, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield i18n("进度:all-done"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc = [] - yield i18n("一键三连进程结束"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} except: traceback.print_exc() close1abc() - yield i18n("一键三连中途报错"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} else: - yield i18n("已有正在进行的一键三连任务,需先终止才能开启下一次任务"), {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1abc(): global ps1abc @@ -595,9 +595,9 @@ def close1abc(): except: traceback.print_exc() ps1abc=[] - return i18n("已终止所有一键三连进程"), {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} + return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: +with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( value= i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") @@ -704,9 +704,9 @@ with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。")) with gr.Row(): batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) - total_epoch = gr.Slider(minimum=1,maximum=20,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) + total_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True) - save_every_epoch = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) + save_every_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True) @@ -717,7 +717,7 @@ with gr.Blocks(title=i18n("GPT-SoVITS WebUI")) as app: gr.Markdown(value=i18n("1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。")) with gr.Row(): batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) - total_epoch1Bb = gr.Slider(minimum=2,maximum=100,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True) + total_epoch1Bb = gr.Slider(minimum=2,maximum=50,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True) if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True) From 810a031fd01275f567c8fc74d3afd8d71ed2f8e0 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 00:11:28 +0800 Subject: [PATCH 019/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index 71d3c72..abb61f1 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -12,14 +12,13 @@ 6-在参考音频结尾留空0.3s,削弱合成音频包含参考音频结尾的问题 -待修复: +### 20240122更新 -1-过短输出文件返回重复参考音频的问题 +1-修复过短输出文件返回重复参考音频的问题。 -2-batch size超过条数导致微调有问题 +2-经测试,英文日文训练原生支持(日文训练需要根目录不含非英文等特殊字符)。 -3-hubert提取在half下出现nan概率更高的问题 +3-音频路径检查。如果尝试读取输入错的路径报错路径不存在,而非ffmpeg错误。 -高优: +待修复:-hubert提取在half下出现nan概率更高的问题 -支持英文日文训练 From 948e7fc086dc68310ff09b2a897233cffb609f1b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 10:30:49 +0800 Subject: [PATCH 020/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index abb61f1..589b0f6 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -10,7 +10,7 @@ 5-清理TEMP文件夹缓存音频等文件 -6-在参考音频结尾留空0.3s,削弱合成音频包含参考音频结尾的问题 +6-大幅削弱合成音频包含参考音频结尾的问题 ### 20240122更新 From 93c47cd9f0c53439536eada18879b4ec5a812ae1 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:59:25 +0800 Subject: [PATCH 021/126] fix nan issue(causing sovits zerodivision) fix nan issue(which will cause sovits zerodivision) --- .../prepare_datasets/2-get-hubert-wav32k.py | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index a5075ff..bf3ab49 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -49,10 +49,13 @@ maxx=0.95 alpha=0.5 device="cuda:0" model=cnhubert.get_model() +# is_half=False if(is_half==True): model=model.half().to(device) else: model = model.to(device) + +nan_fails=[] def name2go(wav_name): hubert_path="%s/%s.pt"%(hubert_dir,wav_name) if(os.path.exists(hubert_path)):return @@ -60,25 +63,27 @@ def name2go(wav_name): tmp_audio = load_audio(wav_path, 32000) tmp_max = np.abs(tmp_audio).max() if tmp_max > 2.2: - print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max)) + print("%s-filtered" % (wav_name, tmp_max)) return tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio tmp_audio = librosa.resample( tmp_audio32, orig_sr=32000, target_sr=16000 - ) + )#不是重采样问题 tensor_wav16 = torch.from_numpy(tmp_audio) if (is_half == True): tensor_wav16=tensor_wav16.half().to(device) else: tensor_wav16 = tensor_wav16.to(device) ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215]) - if np.isnan(ssl.detach().numpy()).sum()!= 0:return + if np.isnan(ssl.detach().numpy()).sum()!= 0: + nan_fails.append(wav_name) + print("nan filtered:%s"%wav_name) + return wavfile.write( "%s/%s"%(wav32dir,wav_name), 32000, tmp_audio32.astype("int16"), ) - # torch.save(ssl,hubert_path ) my_save(ssl,hubert_path ) with open(inp_text,"r",encoding="utf8")as f: @@ -92,3 +97,12 @@ for line in lines[int(i_part)::int(all_parts)]: name2go(wav_name) except: print(line,traceback.format_exc()) + +if(len(nan_fails)>0 and is_half==True): + is_half=False + model=model.float() + for wav_name in nan_fails: + try: + name2go(wav_name) + except: + print(wav_name,traceback.format_exc()) \ No newline at end of file From 252cb3799ffb69ffc2e07dda6cd1c82f0ef7c14c Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 17:00:03 +0800 Subject: [PATCH 022/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index 589b0f6..bbd51f8 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -20,5 +20,6 @@ 3-音频路径检查。如果尝试读取输入错的路径报错路径不存在,而非ffmpeg错误。 -待修复:-hubert提取在half下出现nan概率更高的问题 +### 20240123更新 +1-hubert提取在half下出现nan概率更高的问题 From d96b7d65ece9a89978dda13d6104d5623b57912e Mon Sep 17 00:00:00 2001 From: Kenn Zhang Date: Tue, 23 Jan 2024 17:00:31 +0800 Subject: [PATCH 023/126] =?UTF-8?q?Docker=E5=8C=96=E5=88=9D=E6=AD=A5?= =?UTF-8?q?=E7=89=88=E6=9C=AC=E5=AE=8C=E6=88=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 43 +++++++++++++++++++++++++------------------ README.md | 25 +++++++++++++++++++++++++ config.py | 4 ++-- docker-compose.yaml | 31 +++++++++++++++++++++++++++++++ docs/cn/README.md | 26 ++++++++++++++++++++++++++ docs/ja/README.md | 24 ++++++++++++++++++++++++ 6 files changed, 133 insertions(+), 20 deletions(-) create mode 100644 docker-compose.yaml diff --git a/Dockerfile b/Dockerfile index d39bf21..cbf92cb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,11 @@ # Base CUDA image FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04 +LABEL maintainer="breakstring@hotmail.com" +LABEL version="dev-20240123.03" +LABEL description="Docker image for GPT-SoVITS" + + # Install 3rd party apps ENV DEBIAN_FRONTEND=noninteractive ENV TZ=Etc/UTC @@ -9,33 +14,31 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* && \ git lfs install - -# Install python packages -WORKDIR /temp -COPY ./requirements.txt /temp/requirements.txt -RUN pip install --no-cache-dir -r requirements.txt - - # Copy application WORKDIR /workspace COPY . /workspace - # Download models RUN chmod +x /workspace/Docker/download.sh && /workspace/Docker/download.sh -# Clone 3rd repos -WORKDIR /workspace/tools/damo_asr/models -RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \ - (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull) -RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \ - (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull) -RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \ - (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull) +# 本应该从 requirements.txt 里面安装package,但是由于funasr和modelscope的问题,暂时先在后面手工安装依赖包吧 +RUN pip install --no-cache-dir torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba psutil PyYAML +# 这里强制指定了modelscope和funasr的版本,后面damo_asr的模型让它们自己下载 +RUN pip install --no-cache-dir modelscope~=1.10.0 torchaudio sentencepiece funasr~=0.8.7 -RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c" +# 先屏蔽掉,让容器里自己下载 +# Clone damo_asr +#WORKDIR /workspace/tools/damo_asr/models +#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \ +# (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull) +#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \ +# (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull) +#RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \ +# (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull) -WORKDIR /workspace +#RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c" + +#WORKDIR /workspace EXPOSE 9870 EXPOSE 9871 @@ -43,4 +46,8 @@ EXPOSE 9872 EXPOSE 9873 EXPOSE 9874 +VOLUME /workspace/output +VOLUME /workspace/logs +VOLUME /workspace/SoVITS_weights + CMD ["python", "webui.py"] \ No newline at end of file diff --git a/README.md b/README.md index 7649d7b..59089ea 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,31 @@ For Chinese ASR (additionally), download models from [Damo ASR Model](https://mo For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`. +### Using Docker + +#### docker-compose.yaml configuration + +1. Environment Variables: + - is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation. + +2. Volumes Configuration,The application's root directory inside the container is set to /workspace. The default docker-compose.yaml lists some practical examples for uploading/downloading content. +3. shm_size: The default available memory for Docker Desktop on Windows is too small, which can cause abnormal operations. Adjust according to your own situation. +4. Under the deploy section, GPU-related settings should be adjusted cautiously according to your system and actual circumstances. + + +#### Running with docker compose +``` +docker compose -f "docker-compose.yaml" up -d +``` + +#### Running with docker command + +As above, modify the corresponding parameters based on your actual situation, then run the following command: +``` +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +``` + + ## Dataset Format The TTS annotation .list file format: diff --git a/config.py b/config.py index ec846b3..75db9bc 100644 --- a/config.py +++ b/config.py @@ -1,10 +1,10 @@ -import sys +import sys,os # 推理用的指定模型 sovits_path = "" gpt_path = "" -is_half = True +is_half = eval(os.environ.get("is_half",True)) is_share=False cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..a772c82 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,31 @@ +version: '3.8' + +services: + gpt-sovits: + image: breakstring/gpt-sovits:dev-20240123.03 + container_name: gpt-sovits-container + environment: + - is_half=False + volumes: + - G:/GPT-SoVITS-DockerTest/output:/workspace/output + - G:/GPT-SoVITS-DockerTest/logs:/workspace/logs + - G:/GPT-SoVITS-DockerTest/SoVITS_weights:/workspace/SoVITS_weights + - G:/GPT-SoVITS-DockerTest/reference:/workspace/reference + working_dir: /workspace + ports: + - "9870:9870" + - "9871:9871" + - "9872:9872" + - "9873:9873" + - "9874:9874" + shm_size: 16G + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: "all" + capabilities: [gpu] + stdin_open: true + tty: true + restart: unless-stopped diff --git a/docs/cn/README.md b/docs/cn/README.md index 27c5668..072dc0d 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -87,6 +87,32 @@ brew install ffmpeg 下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。 +### 在 Docker 中使用 + +#### docker-compose.yaml 设置 + +1. 环境变量: + - is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时,一般都是它引起的,可以根据实际情况来调整为True或者False。 + +2. Volume设置,容器内的应用根目录设置为 /workspace。 默认的 docker-compose.yaml 中列出了一些实际的例子,便于上传/下载内容。 +3. shm_size:Windows下的Docker Desktop默认可用内存过小,会导致运行异常,根据自己情况酌情设置。 +4. deploy小节下的gpu相关内容,请根据您的系统和实际情况酌情设置。 + + + +#### 通过 docker compose运行 +``` +docker compose -f "docker-compose.yaml" up -d +``` + +#### 通过 docker 命令运行 + +同上,根据您自己的实际情况修改对应的参数,然后运行如下命令: +``` +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +``` + + ### 预训练模型 diff --git a/docs/ja/README.md b/docs/ja/README.md index d0987a8..cadb68b 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -93,6 +93,30 @@ brew install ffmpeg [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) と [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) をダウンロードし、GPT-SoVITS のルートディレクトリに置きます。 +### Dockerの使用 + +#### docker-compose.yamlの設定 + +1. 環境変数: + - `is_half`:半精度/倍精度の制御。"SSL抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じてTrueまたはFalseに調整してください。 + +2. ボリューム設定:コンテナ内のアプリケーションのルートディレクトリは`/workspace`に設定されます。デフォルトの`docker-compose.yaml`には、アップロード/ダウンロードの内容の実例がいくつか記載されています。 +3. `shm_size`:WindowsのDocker Desktopのデフォルトの利用可能メモリが小さすぎるため、異常な動作を引き起こす可能性があります。状況に応じて適宜設定してください。 +4. `deploy`セクションのGPUに関連する内容は、システムと実際の状況に応じて慎重に設定してください。 + +#### docker composeで実行する +```markdown +docker compose -f "docker-compose.yaml" up -d +``` + +#### dockerコマンドで実行する + +上記と同様に、実際の状況に基づいて対応するパラメータを変更し、次のコマンドを実行します: +```markdown +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +``` + + ### 事前訓練済みモデル From d1ec88193f592ecc50d6ded7508f37ba258fc5c8 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 18:41:05 +0800 Subject: [PATCH 024/126] Update config.py --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index 75db9bc..8b5f378 100644 --- a/config.py +++ b/config.py @@ -4,7 +4,7 @@ import sys,os # 推理用的指定模型 sovits_path = "" gpt_path = "" -is_half = eval(os.environ.get("is_half",True)) +is_half = eval(os.environ.get("is_half","True")) is_share=False cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" From 396043a2ed852288fc736f9fe72c737c014e24ef Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 18:43:15 +0800 Subject: [PATCH 025/126] Add files via upload --- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index bf3ab49..71b48a9 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -66,8 +66,9 @@ def name2go(wav_name): print("%s-filtered" % (wav_name, tmp_max)) return tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio + tmp_audio32b = (tmp_audio / tmp_max * (maxx * alpha*1145.14)) + ((1 - alpha)*1145.14) * tmp_audio tmp_audio = librosa.resample( - tmp_audio32, orig_sr=32000, target_sr=16000 + tmp_audio32b, orig_sr=32000, target_sr=16000 )#不是重采样问题 tensor_wav16 = torch.from_numpy(tmp_audio) if (is_half == True): From da19013b06f856213e9404371a3cff0a142e9090 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 18:52:03 +0800 Subject: [PATCH 026/126] Add files via upload --- GPT_SoVITS/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPT_SoVITS/utils.py b/GPT_SoVITS/utils.py index e1a66ea..0ce03b3 100644 --- a/GPT_SoVITS/utils.py +++ b/GPT_SoVITS/utils.py @@ -18,7 +18,7 @@ logging.getLogger("matplotlib").setLevel(logging.ERROR) MATPLOTLIB_FLAG = False -logging.basicConfig(stream=sys.stdout, level=logging.WARNING) +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logger = logging @@ -310,13 +310,13 @@ def check_git_hash(model_dir): def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.WARNING) + logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.WARNING) + h.setLevel(logging.DEBUG) h.setFormatter(formatter) logger.addHandler(h) return logger From 2e834b305fa33c44416794b7f55e95456684ce4d Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 18:56:08 +0800 Subject: [PATCH 027/126] Add files via upload --- tools/subfix_webui.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/subfix_webui.py b/tools/subfix_webui.py index ad4907b..d6624d0 100644 --- a/tools/subfix_webui.py +++ b/tools/subfix_webui.py @@ -110,6 +110,7 @@ def b_submit_change(*text_list): def b_delete_audio(*checkbox_list): global g_data_json, g_index, g_max_json_index + b_save_file() change = False for i, checkbox in reversed(list(enumerate(checkbox_list))): if g_index + i < len(g_data_json): @@ -121,8 +122,8 @@ def b_delete_audio(*checkbox_list): if g_index > g_max_json_index: g_index = g_max_json_index g_index = g_index if g_index >= 0 else 0 - # if change: - # b_save_file() + if change: + b_save_file() # return gr.Slider(value=g_index, maximum=(g_max_json_index if g_max_json_index>=0 else 0)), *b_change_index(g_index, g_batch) return {"value":g_index,"__type__":"update","maximum":(g_max_json_index if g_max_json_index>=0 else 0)},*b_change_index(g_index, g_batch) @@ -172,6 +173,7 @@ def b_audio_split(audio_breakpoint, *checkbox_list): def b_merge_audio(interval_r, *checkbox_list): global g_data_json , g_max_json_index + b_save_file() checked_index = [] audios_path = [] audios_text = [] From 73cf11e04dcb94fba47376cd127b6bccaab28002 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:19:37 +0800 Subject: [PATCH 028/126] Update chinese.py --- GPT_SoVITS/text/chinese.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index 64c8818..de3ef01 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -18,7 +18,7 @@ pinyin_to_symbol_map = { for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() } -import jieba.posseg as psg +import jieba_fast.posseg as psg rep_map = { From 80fffb0ad46e4e7f27948d5a57c88cf342088d50 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:19:46 +0800 Subject: [PATCH 029/126] Update tone_sandhi.py --- GPT_SoVITS/text/tone_sandhi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/text/tone_sandhi.py b/GPT_SoVITS/text/tone_sandhi.py index f987a3f..eafb179 100644 --- a/GPT_SoVITS/text/tone_sandhi.py +++ b/GPT_SoVITS/text/tone_sandhi.py @@ -14,7 +14,7 @@ from typing import List from typing import Tuple -import jieba +import jieba_fast as jieba from pypinyin import lazy_pinyin from pypinyin import Style From 7a32d77782f26ab8f284643f050ce11c65344dfb Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:20:26 +0800 Subject: [PATCH 030/126] Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fedce8a..a8e72ea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,4 @@ transformers chardet PyYAML psutil -jieba +jieba_fast From 63cfc839834f73e89f21c2c81b7c6fd949c25f3d Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:20:39 +0800 Subject: [PATCH 031/126] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 59089ea..0e80304 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ sudo apt-get install python3.9-distutils #### Pip Packages ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba_fast ``` #### Additional Requirements From 63625758a99e645f3218dd167924e01a0e3cf0dc Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:57:47 +0800 Subject: [PATCH 032/126] Add files via upload --- webui.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/webui.py b/webui.py index 1520226..4461056 100644 --- a/webui.py +++ b/webui.py @@ -1,4 +1,4 @@ -import os,shutil,sys,pdb +import os,shutil,sys,pdb,re now_dir = os.getcwd() sys.path.append(now_dir) import json,yaml,warnings,torch @@ -85,9 +85,16 @@ os.makedirs(SoVITS_weight_root,exist_ok=True) os.makedirs(GPT_weight_root,exist_ok=True) SoVITS_names,GPT_names = get_weights_names() +def custom_sort_key(s): + # 使用正则表达式提取字符串中的数字部分和非数字部分 + parts = re.split('(\d+)', s) + # 将数字部分转换为整数,非数字部分保持不变 + parts = [int(part) if part.isdigit() else part for part in parts] + return parts + def change_choices(): SoVITS_names, GPT_names = get_weights_names() - return {"choices": sorted(SoVITS_names), "__type__": "update"}, {"choices": sorted(GPT_names), "__type__": "update"} + return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"} p_label=None p_uvr5=None @@ -733,8 +740,8 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: with gr.TabItem(i18n("1C-推理")): gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。")) with gr.Row(): - GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names),value=pretrained_gpt_name) - SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names),value=pretrained_sovits_name) + GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names,key=custom_sort_key),value=pretrained_gpt_name,interactive=True) + SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names,key=custom_sort_key),value=pretrained_sovits_name,interactive=True) gpu_number_1C=gr.Textbox(label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True) refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown]) From 0c691191e894c15686e88279745712b3c6dc232f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 20:57:51 +0800 Subject: [PATCH 033/126] Add files via upload --- GPT_SoVITS/inference_webui.py | 110 ++++++++++++++++++++++------------ 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 246748a..3046d7a 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -1,4 +1,5 @@ -import os +import os,re +import pdb gpt_path = os.environ.get( "gpt_path", "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" @@ -42,8 +43,6 @@ if is_half == True: else: bert_model = bert_model.to(device) - -# bert_model=bert_model.to(device) def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") @@ -57,15 +56,8 @@ def get_bert_feature(text, word2ph): repeat_feature = res[i].repeat(word2ph[i], 1) phone_level_feature.append(repeat_feature) phone_level_feature = torch.cat(phone_level_feature, dim=0) - # if(is_half==True):phone_level_feature=phone_level_feature.half() return phone_level_feature.T - -n_semantic = 1024 - -dict_s2=torch.load(sovits_path,map_location="cpu") -hps=dict_s2["config"] - class DictToAttrRecursive(dict): def __init__(self, input_dict): super().__init__(input_dict) @@ -94,40 +86,48 @@ class DictToAttrRecursive(dict): raise AttributeError(f"Attribute {item} not found") -hps = DictToAttrRecursive(hps) - -hps.model.semantic_frame_rate = "25hz" -dict_s1 = torch.load(gpt_path, map_location="cpu") -config = dict_s1["config"] ssl_model = cnhubert.get_model() if is_half == True: ssl_model = ssl_model.half().to(device) else: ssl_model = ssl_model.to(device) -vq_model = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model -) -if is_half == True: - vq_model = vq_model.half().to(device) -else: - vq_model = vq_model.to(device) -vq_model.eval() -print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) -hz = 50 -max_sec = config["data"]["max_sec"] -t2s_model = Text2SemanticLightningModule(config, "ojbk", is_train=False) -t2s_model.load_state_dict(dict_s1["weight"]) -if is_half == True: - t2s_model = t2s_model.half() -t2s_model = t2s_model.to(device) -t2s_model.eval() -total = sum([param.nelement() for param in t2s_model.parameters()]) -print("Number of parameter: %.2fM" % (total / 1e6)) +def change_sovits_weights(sovits_path): + global vq_model,hps + dict_s2=torch.load(sovits_path,map_location="cpu") + hps=dict_s2["config"] + hps = DictToAttrRecursive(hps) + hps.model.semantic_frame_rate = "25hz" + vq_model = SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model + ) + del vq_model.enc_q + if is_half == True: + vq_model = vq_model.half().to(device) + else: + vq_model = vq_model.to(device) + vq_model.eval() + print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) +change_sovits_weights(sovits_path) +def change_gpt_weights(gpt_path): + global hz,max_sec,t2s_model,config + hz = 50 + dict_s1 = torch.load(gpt_path, map_location="cpu") + config = dict_s1["config"] + max_sec = config["data"]["max_sec"] + t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) + t2s_model.load_state_dict(dict_s1["weight"]) + if is_half == True: + t2s_model = t2s_model.half() + t2s_model = t2s_model.to(device) + t2s_model.eval() + total = sum([param.nelement() for param in t2s_model.parameters()]) + print("Number of parameter: %.2fM" % (total / 1e6)) +change_gpt_weights(gpt_path) def get_spepc(hps, filename): audio = load_audio(filename, int(hps.data.sampling_rate)) @@ -325,14 +325,46 @@ def cut3(inp): inp = inp.strip("\n") return "\n".join(["%s。" % item for item in inp.strip("。").split("。")]) +def custom_sort_key(s): + # 使用正则表达式提取字符串中的数字部分和非数字部分 + parts = re.split('(\d+)', s) + # 将数字部分转换为整数,非数字部分保持不变 + parts = [int(part) if part.isdigit() else part for part in parts] + return parts + +def change_choices(): + SoVITS_names, GPT_names = get_weights_names() + return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"} + +pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth" +pretrained_gpt_name="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" +SoVITS_weight_root="SoVITS_weights" +GPT_weight_root="GPT_weights" +os.makedirs(SoVITS_weight_root,exist_ok=True) +os.makedirs(GPT_weight_root,exist_ok=True) +def get_weights_names(): + SoVITS_names = [pretrained_sovits_name] + for name in os.listdir(SoVITS_weight_root): + if name.endswith(".pth"):SoVITS_names.append("%s/%s"%(SoVITS_weight_root,name)) + GPT_names = [pretrained_gpt_name] + for name in os.listdir(GPT_weight_root): + if name.endswith(".ckpt"): GPT_names.append("%s/%s"%(GPT_weight_root,name)) + return SoVITS_names,GPT_names +SoVITS_names,GPT_names = get_weights_names() with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) - # with gr.Tabs(): - # with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): with gr.Group(): + gr.Markdown(value=i18n("模型切换")) + with gr.Row(): + GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path,interactive=True) + SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path,interactive=True) + refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") + refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown]) + SoVITS_dropdown.change(change_sovits_weights,[SoVITS_dropdown],[]) + GPT_dropdown.change(change_gpt_weights,[GPT_dropdown],[]) gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") From 69f588dfd8abc5b41df82aaef5c4a02b86b59795 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 21:00:40 +0800 Subject: [PATCH 034/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index bbd51f8..93fc8be 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -21,5 +21,11 @@ 3-音频路径检查。如果尝试读取输入错的路径报错路径不存在,而非ffmpeg错误。 ### 20240123更新 -1-hubert提取在half下出现nan概率更高的问题 +1-解决hubert提取nan导致SoVITS/GPT训练报错ZeroDivisionError的问题 + +2-支持推理界面快速切换模型 + +3-优化模型文件排序逻辑 + +4-中文分词使用jieba_fast代替jieba From 1fa37308616c610daffa8244cce829d4f999fda3 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 22:45:00 +0800 Subject: [PATCH 035/126] Update README.md --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 0e80304..4bf6581 100644 --- a/README.md +++ b/README.md @@ -52,11 +52,6 @@ conda activate GPTSoVits bash install.sh ``` ### Install Manually -#### Make sure you have the distutils for python3.9 installed - -```bash -sudo apt-get install python3.9-distutils -``` #### Pip Packages From 7c7fed1135a9b07aba32457ed2260ffa6456d30e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 22:45:19 +0800 Subject: [PATCH 036/126] Update README.md --- docs/ja/README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/ja/README.md b/docs/ja/README.md index cadb68b..9d2e9ad 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -48,11 +48,6 @@ conda activate GPTSoVits bash install.sh ``` ### 手動インストール -#### python3.9 用の distutils がインストールされていることを確認する - -```bash -sudo apt-get install python3.9-distutils -``` #### Pip パッケージ From e6a71877fe28fc9224449f4ccbef3f359f55f354 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Tue, 23 Jan 2024 22:52:04 +0800 Subject: [PATCH 037/126] Update zh_CN.json --- i18n/locale/zh_CN.json | 408 +++++++++++++++++++++++++++-------------- 1 file changed, 275 insertions(+), 133 deletions(-) diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index 32ca5ef..c04d656 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -1,135 +1,277 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音", - "A模型权重": "A模型权重", - "A模型路径": "A模型路径", - "B模型路径": "B模型路径", - "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调", - "Index Rate": "Index Rate", - "Onnx导出": "Onnx导出", - "Onnx输出路径": "Onnx输出路径", - "RVC模型路径": "RVC模型路径", - "ckpt处理": "ckpt处理", - "harvest进程数": "harvest进程数", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ", - "step1:正在处理数据": "step1:正在处理数据", - "step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)", - "step3: 填写训练设置, 开始训练模型和索引": "step3: 填写训练设置, 开始训练模型和索引", - "step3a:正在训练模型": "step3a:正在训练模型", - "一键训练": "一键训练", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2", - "伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "保存名", - "保存的文件名, 默认空为和源文件同名": "保存的文件名, 默认空为和源文件同名", - "保存的模型名不带后缀": "保存的模型名不带后缀", - "保存频率save_every_epoch": "保存频率save_every_epoch", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果", - "修改": "修改", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型信息(仅支持weights文件夹下提取的小模型文件)", - "停止音频转换": "停止音频转换", - "全流程结束!": "全流程结束!", - "刷新音色列表和索引路径": "刷新音色列表和索引路径", - "加载模型": "加载模型", - "加载预训练底模D路径": "加载预训练底模D路径", - "加载预训练底模G路径": "加载预训练底模G路径", - "单次推理": "单次推理", - "卸载音色省显存": "卸载音色省显存", - "变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)", - "后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样", - "否": "否", - "启用相位声码器": "启用相位声码器", - "响应阈值": "响应阈值", - "响度因子": "响度因子", - "处理数据": "处理数据", - "导出Onnx模型": "导出Onnx模型", - "导出文件格式": "导出文件格式", - "常见问题解答": "常见问题解答", - "常规设置": "常规设置", - "开始音频转换": "开始音频转换", - "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", - "性能设置": "性能设置", - "总训练轮数total_epoch": "总训练轮数total_epoch", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出文件夹": "指定输出文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "推理时间(ms):": "推理时间(ms):", - "推理音色": "推理音色", - "提取": "提取", - "提取音高和处理数据使用的CPU进程数": "提取音高和处理数据使用的CPU进程数", - "是": "是", - "是否仅保存最新的ckpt文件以节省硬盘空间": "是否仅保存最新的ckpt文件以节省硬盘空间", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速", - "显卡信息": "显卡信息", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", - "查看": "查看", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型信息(仅支持weights文件夹下提取的小模型文件)", - "检索特征占比": "检索特征占比", - "模型": "模型", - "模型推理": "模型推理", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况", - "模型是否带音高指导": "模型是否带音高指导", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否带音高指导(唱歌一定要, 语音可以不要)", - "模型是否带音高指导,1是0否": "模型是否带音高指导,1是0否", - "模型版本型号": "模型版本型号", - "模型融合, 可用于测试音色融合": "模型融合, 可用于测试音色融合", - "模型路径": "模型路径", - "每张显卡的batch_size": "每张显卡的batch_size", - "淡入淡出长度": "淡入淡出长度", - "版本": "版本", - "特征提取": "特征提取", - "特征检索库文件路径,为空则使用下拉的选择结果": "特征检索库文件路径,为空则使用下拉的选择结果", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ", - "目标采样率": "目标采样率", - "算法延迟(ms):": "算法延迟(ms):", - "自动检测index路径,下拉式选择(dropdown)": "自动检测index路径,下拉式选择(dropdown)", - "融合": "融合", - "要改的模型信息": "要改的模型信息", - "要置入的模型信息": "要置入的模型信息", - "训练": "训练", - "训练模型": "训练模型", - "训练特征索引": "训练特征索引", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", - "请指定说话人id": "请指定说话人id", - "请选择index文件": "请选择index文件", - "请选择pth文件": "请选择pth文件", - "请选择说话人id": "请选择说话人id", - "转换": "转换", - "输入实验名": "输入实验名", - "输入待处理音频文件夹路径": "输入待处理音频文件夹路径", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)", - "输入待处理音频文件路径(默认是正确格式示例)": "输入待处理音频文件路径(默认是正确格式示例)", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络", - "输入监听": "输入监听", - "输入训练文件夹路径": "输入训练文件夹路径", - "输入设备": "输入设备", - "输入降噪": "输入降噪", - "输出信息": "输出信息", - "输出变声": "输出变声", - "输出设备": "输出设备", - "输出降噪": "输出降噪", - "输出音频(右下角三个点,点了可以下载)": "输出音频(右下角三个点,点了可以下载)", - "选择.index文件": "选择.index文件", - "选择.pth文件": "选择.pth文件", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU", - "采样率:": "采样率:", - "采样长度": "采样长度", - "重载设备列表": "重载设备列表", - "音调设置": "音调设置", - "音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)", - "音高算法": "音高算法", - "额外推理时长": "额外推理时长" + "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", + "UVR5已开启": "UVR5已开启", + "UVR5已关闭": "UVR5已关闭", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", + "0-前置数据集获取工具": "0-前置数据集获取工具", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具", + "是否开启UVR5-WebUI": "是否开启UVR5-WebUI", + "UVR5进程输出信息": "UVR5进程输出信息", + "0b-语音切分工具": "0b-语音切分工具", + "音频自动切分输入路径,可文件可文件夹": "音频自动切分输入路径,可文件可文件夹", + "切分后的子音频的输出根目录": "切分后的子音频的输出根目录", + "threshold:音量小于这个值视作静音的备选切割点": "threshold:音量小于这个值视作静音的备选切割点", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值", + "min_interval:最短切割间隔": "min_interval:最短切割间隔", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept:切完后静音最多留多长", + "开启语音切割": "开启语音切割", + "终止语音切割": "终止语音切割", + "max:归一化后最大值多少": "max:归一化后最大值多少", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix:混多少比例归一化后音频进来", + "切割使用的进程数": "切割使用的进程数", + "语音切割进程输出信息": "语音切割进程输出信息", + "0c-中文批量离线ASR工具": "0c-中文批量离线ASR工具", + "开启离线批量ASR": "开启离线批量ASR", + "终止ASR进程": "终止ASR进程", + "批量ASR(中文only)输入文件夹路径": "批量ASR(中文only)输入文件夹路径", + "ASR进程输出信息": "ASR进程输出信息", + "0d-语音文本校对标注工具": "0d-语音文本校对标注工具", + "是否开启打标WebUI": "是否开启打标WebUI", + "打标数据标注文件路径": "打标数据标注文件路径", + "打标工具进程输出信息": "打标工具进程输出信息", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*实验/模型名", + "显卡信息": "显卡信息", + "预训练的SoVITS-G模型路径": "预训练的SoVITS-G模型路径", + "预训练的SoVITS-D模型路径": "预训练的SoVITS-D模型路径", + "预训练的GPT模型路径": "预训练的GPT模型路径", + "1A-训练集格式化工具": "1A-训练集格式化工具", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "输出logs/实验名目录下应有23456开头的文件和文件夹", + "*文本标注文件": "*文本标注文件", + "*训练集音频文件目录": "*训练集音频文件目录", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "训练集音频文件目录 拼接 list文件里波形对应的文件名。", + "1Aa-文本内容": "1Aa-文本内容", + "GPU卡号以-分割,每个卡号一个进程": "GPU卡号以-分割,每个卡号一个进程", + "预训练的中文BERT模型路径": "预训练的中文BERT模型路径", + "开启文本获取": "开启文本获取", + "终止文本获取进程": "终止文本获取进程", + "文本进程输出信息": "文本进程输出信息", + "1Ab-SSL自监督特征提取": "1Ab-SSL自监督特征提取", + "预训练的SSL模型路径": "预训练的SSL模型路径", + "开启SSL提取": "开启SSL提取", + "终止SSL提取进程": "终止SSL提取进程", + "SSL进程输出信息": "SSL进程输出信息", + "1Ac-语义token提取": "1Ac-语义token提取", + "开启语义token提取": "开启语义token提取", + "终止语义token提取进程": "终止语义token提取进程", + "语义token提取进程输出信息": "语义token提取进程输出信息", + "1Aabc-训练集格式化一键三连": "1Aabc-训练集格式化一键三连", + "开启一键三连": "开启一键三连", + "终止一键三连": "终止一键三连", + "一键三连进程输出信息": "一键三连进程输出信息", + "1B-微调训练": "1B-微调训练", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。", + "每张显卡的batch_size": "每张显卡的batch_size", + "总训练轮数total_epoch,不建议太高": "总训练轮数total_epoch,不建议太高", + "文本模块学习率权重": "文本模块学习率权重", + "保存频率save_every_epoch": "保存频率save_every_epoch", + "是否仅保存最新的ckpt文件以节省硬盘空间": "是否仅保存最新的ckpt文件以节省硬盘空间", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹", + "开启SoVITS训练": "开启SoVITS训练", + "终止SoVITS训练": "终止SoVITS训练", + "SoVITS训练进程输出信息": "SoVITS训练进程输出信息", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。", + "总训练轮数total_epoch": "总训练轮数total_epoch", + "开启GPT训练": "开启GPT训练", + "终止GPT训练": "终止GPT训练", + "GPT训练进程输出信息": "GPT训练进程输出信息", + "1C-推理": "1C-推理", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。", + "*GPT模型列表": "*GPT模型列表", + "*SoVITS模型列表": "*SoVITS模型列表", + "GPU卡号,只能填1个整数": "GPU卡号,只能填1个整数", + "刷新模型路径": "刷新模型路径", + "是否开启TTS推理WebUI": "是否开启TTS推理WebUI", + "TTS推理WebUI进程输出信息": "TTS推理WebUI进程输出信息", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-变声", + "施工中,请静候佳音": "施工中,请静候佳音", + "TTS推理进程已开启": "TTS推理进程已开启", + "TTS推理进程已关闭": "TTS推理进程已关闭", + "打标工具WebUI已开启": "打标工具WebUI已开启", + "打标工具WebUI已关闭": "打标工具WebUI已关闭", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", + "*请上传并填写参考信息": "*请上传并填写参考信息", + "*请填写需要合成的目标文本": "*请填写需要合成的目标文本", + "ASR任务开启:%s": "ASR任务开启:%s", + "GPT训练完成": "GPT训练完成", + "GPT训练开始:%s": "GPT训练开始:%s", + "SSL提取进程执行中": "SSL提取进程执行中", + "SSL提取进程结束": "SSL提取进程结束", + "SoVITS训练完成": "SoVITS训练完成", + "SoVITS训练开始:%s": "SoVITS训练开始:%s", + "一键三连中途报错": "一键三连中途报错", + "一键三连进程结束": "一键三连进程结束", + "中文": "中文", + "凑50字一切": "凑50字一切", + "凑五句一切": "凑五句一切", + "切分后文本": "切分后文本", + "切割执行中": "切割执行中", + "切割结束": "切割结束", + "参考音频的文本": "参考音频的文本", + "参考音频的语种": "参考音频的语种", + "合成语音": "合成语音", + "后续将支持混合语种编码文本输入。": "后续将支持混合语种编码文本输入。", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "已有正在进行的ASR任务,需先终止才能开启下一次任务", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "已有正在进行的GPT训练任务,需先终止才能开启下一次任务", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "已有正在进行的一键三连任务,需先终止才能开启下一次任务", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "已有正在进行的切割任务,需先终止才能开启下一次任务", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "已有正在进行的文本任务,需先终止才能开启下一次任务", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", + "已终止ASR进程": "已终止ASR进程", + "已终止GPT训练": "已终止GPT训练", + "已终止SoVITS训练": "已终止SoVITS训练", + "已终止所有1a进程": "已终止所有1a进程", + "已终止所有1b进程": "已终止所有1b进程", + "已终止所有一键三连进程": "已终止所有一键三连进程", + "已终止所有切割进程": "已终止所有切割进程", + "已终止所有语义token进程": "已终止所有语义token进程", + "按中文句号。切": "按中文句号。切", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。", + "文本进程执行中": "文本进程执行中", + "文本进程结束": "文本进程结束", + "日文": "日文", + "英文": "英文", + "语义token提取进程执行中": "语义token提取进程执行中", + "语义token提取进程结束": "语义token提取进程结束", + "请上传参考音频": "请上传参考音频", + "输入路径不存在": "输入路径不存在", + "输入路径存在但既不是文件也不是文件夹": "输入路径存在但既不是文件也不是文件夹", + "输出的语音": "输出的语音", + "进度:1a-done": "进度:1a-done", + "进度:1a-done, 1b-ing": "进度:1a-done, 1b-ing", + "进度:1a-ing": "进度:1a-ing", + "进度:1a1b-done": "进度:1a1b-done", + "进度:1a1b-done, 1cing": "进度:1a1b-done, 1cing", + "进度:all-done": "进度:all-done", + "需要合成的切分前文本": "需要合成的切分前文本", + "需要合成的文本": "需要合成的文本", + "需要合成的语种": "需要合成的语种", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音", + "A模型权重": "A模型权重", + "A模型路径": "A模型路径", + "B模型路径": "B模型路径", + "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调", + "Index Rate": "Index Rate", + "Onnx导出": "Onnx导出", + "Onnx输出路径": "Onnx输出路径", + "RVC模型路径": "RVC模型路径", + "ckpt处理": "ckpt处理", + "harvest进程数": "harvest进程数", + "index文件路径不可包含中文": "index文件路径不可包含中文", + "pth文件路径不可包含中文": "pth文件路径不可包含中文", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ", + "step1:正在处理数据": "step1:正在处理数据", + "step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)", + "step3: 填写训练设置, 开始训练模型和索引": "step3: 填写训练设置, 开始训练模型和索引", + "step3a:正在训练模型": "step3a:正在训练模型", + "一键训练": "一键训练", + "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹", + "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2", + "伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声", + "使用模型采样率": "使用模型采样率", + "使用设备采样率": "使用设备采样率", + "保存名": "保存名", + "保存的文件名, 默认空为和源文件同名": "保存的文件名, 默认空为和源文件同名", + "保存的模型名不带后缀": "保存的模型名不带后缀", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果", + "修改": "修改", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型信息(仅支持weights文件夹下提取的小模型文件)", + "停止音频转换": "停止音频转换", + "全流程结束!": "全流程结束!", + "刷新音色列表和索引路径": "刷新音色列表和索引路径", + "加载模型": "加载模型", + "加载预训练底模D路径": "加载预训练底模D路径", + "加载预训练底模G路径": "加载预训练底模G路径", + "单次推理": "单次推理", + "卸载音色省显存": "卸载音色省显存", + "变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)", + "后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样", + "否": "否", + "启用相位声码器": "启用相位声码器", + "响应阈值": "响应阈值", + "响度因子": "响度因子", + "处理数据": "处理数据", + "导出Onnx模型": "导出Onnx模型", + "导出文件格式": "导出文件格式", + "常见问题解答": "常见问题解答", + "常规设置": "常规设置", + "开始音频转换": "开始音频转换", + "性能设置": "性能设置", + "批量推理": "批量推理", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ", + "指定输出主人声文件夹": "指定输出主人声文件夹", + "指定输出文件夹": "指定输出文件夹", + "指定输出非主人声文件夹": "指定输出非主人声文件夹", + "推理时间(ms):": "推理时间(ms):", + "推理音色": "推理音色", + "提取": "提取", + "提取音高和处理数据使用的CPU进程数": "提取音高和处理数据使用的CPU进程数", + "是": "是", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速", + "查看": "查看", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型信息(仅支持weights文件夹下提取的小模型文件)", + "检索特征占比": "检索特征占比", + "模型": "模型", + "模型推理": "模型推理", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况", + "模型是否带音高指导": "模型是否带音高指导", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否带音高指导(唱歌一定要, 语音可以不要)", + "模型是否带音高指导,1是0否": "模型是否带音高指导,1是0否", + "模型版本型号": "模型版本型号", + "模型融合, 可用于测试音色融合": "模型融合, 可用于测试音色融合", + "模型路径": "模型路径", + "淡入淡出长度": "淡入淡出长度", + "版本": "版本", + "特征提取": "特征提取", + "特征检索库文件路径,为空则使用下拉的选择结果": "特征检索库文件路径,为空则使用下拉的选择结果", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ", + "目标采样率": "目标采样率", + "算法延迟(ms):": "算法延迟(ms):", + "自动检测index路径,下拉式选择(dropdown)": "自动检测index路径,下拉式选择(dropdown)", + "融合": "融合", + "要改的模型信息": "要改的模型信息", + "要置入的模型信息": "要置入的模型信息", + "训练": "训练", + "训练模型": "训练模型", + "训练特征索引": "训练特征索引", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", + "请指定说话人id": "请指定说话人id", + "请选择index文件": "请选择index文件", + "请选择pth文件": "请选择pth文件", + "请选择说话人id": "请选择说话人id", + "转换": "转换", + "输入实验名": "输入实验名", + "输入待处理音频文件夹路径": "输入待处理音频文件夹路径", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)", + "输入待处理音频文件路径(默认是正确格式示例)": "输入待处理音频文件路径(默认是正确格式示例)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络", + "输入监听": "输入监听", + "输入训练文件夹路径": "输入训练文件夹路径", + "输入设备": "输入设备", + "输入降噪": "输入降噪", + "输出信息": "输出信息", + "输出变声": "输出变声", + "输出设备": "输出设备", + "输出降噪": "输出降噪", + "输出音频(右下角三个点,点了可以下载)": "输出音频(右下角三个点,点了可以下载)", + "选择.index文件": "选择.index文件", + "选择.pth文件": "选择.pth文件", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU", + "采样率:": "采样率:", + "采样长度": "采样长度", + "重载设备列表": "重载设备列表", + "音调设置": "音调设置", + "音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)", + "音高算法": "音高算法", + "额外推理时长": "额外推理时长" } From a61a0b9631f8d44121291db1548a2e046a6bdc38 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Tue, 23 Jan 2024 23:33:48 +0800 Subject: [PATCH 038/126] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 3046d7a..fd04ac8 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -401,6 +401,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, + share=is_share, server_port=infer_ttswebui, quiet=True, ) From 52a915b08a0417b878732bc31ee5b3f49d60ec75 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Wed, 24 Jan 2024 00:09:21 +0800 Subject: [PATCH 039/126] Update ja_JP.json --- i18n/locale/ja_JP.json | 543 ++++++++++++++++++++--------------------- 1 file changed, 265 insertions(+), 278 deletions(-) diff --git a/i18n/locale/ja_JP.json b/i18n/locale/ja_JP.json index 03143d1..bd553b6 100644 --- a/i18n/locale/ja_JP.json +++ b/i18n/locale/ja_JP.json @@ -1,288 +1,275 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3 次に、harvestピッチの認識結果に対してメディアンフィルタを使用します。値はフィルター半径で、ミュートを減衰させるために使用します。", - "A模型权重": "Aモデルの重み", - "A模型路径": "Aモデルのパス", - "B模型路径": "Bモデルのパス", - "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0(最低共振周波数)カーブファイル(オプション、1行に1ピッチ、デフォルトのF0(最低共振周波数)とエレベーションを置き換えます。)", - "Index Rate": "Index Rate", - "Onnx导出": "Onnxエクスポート", - "Onnx输出路径": "Onnx出力パス", - "RVC模型路径": "RVCモデルパス", - "ckpt处理": "ckptファイルの処理", - "harvest进程数": "harvestプロセス数", - "index文件路径不可包含中文": "indexファイルのパスに漢字を含んではいけません", - "pth文件路径不可包含中文": "pthファイルのパスに漢字を含んではいけません", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpeカード番号設定:異なるプロセスに使用するカード番号を入力する。例えば、0-0-1でカード0に2つのプロセス、カード1に1つのプロセスを実行する。", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "ステップ1:実験設定を入力します。実験データはlogsに保存され、各実験にはフォルダーがあります。実験名のパスを手動で入力する必要があり、実験設定、ログ、トレーニングされたモデルファイルが含まれます。", - "step1:正在处理数据": "step1:処理中のデータ", - "step2:正在提取音高&正在提取特征": "step2:ピッチ抽出と特徴抽出", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "ステップ2a: 訓練フォルダー内のすべての音声ファイルを自動的に探索し、スライスと正規化を行い、2つのwavフォルダーを実験ディレクトリに生成します。現在は一人でのトレーニングのみをサポートしています。", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "ステップ2b: CPUを使用して音高を抽出する(モデルに音高がある場合)、GPUを使用して特徴を抽出する(GPUの番号を選択する)", - "step3: 填写训练设置, 开始训练模型和索引": "ステップ3: トレーニング設定を入力して、モデルとインデックスのトレーニングを開始します", - "step3a:正在训练模型": "step3a:トレーニング中のモデル", - "一键训练": "ワンクリックトレーニング", - "也可批量输入音频文件, 二选一, 优先读文件夹": "複数のオーディオファイルをインポートすることもできます。フォルダパスが存在する場合、この入力は無視されます。", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "UVR5モデルを使用したボーカル伴奏の分離バッチ処理。
有効なフォルダーパスフォーマットの例: D:\\path\\to\\input\\folder (エクスプローラーのアドレスバーからコピーします)。
モデルは三つのカテゴリに分かれています:
1. ボーカルを保持: ハーモニーのないオーディオに対してこれを選択します。HP5よりもボーカルをより良く保持します。HP2とHP3の二つの内蔵モデルが含まれています。HP3は伴奏をわずかに漏らす可能性がありますが、HP2よりもわずかにボーカルをより良く保持します。
2. 主なボーカルのみを保持: ハーモニーのあるオーディオに対してこれを選択します。主なボーカルを弱める可能性があります。HP5の一つの内蔵モデルが含まれています。
3. ディリバーブとディレイモデル (by FoxJoy):
  (1) MDX-Net: ステレオリバーブの除去に最適な選択肢ですが、モノリバーブは除去できません;
 (234) DeEcho: ディレイ効果を除去します。AggressiveモードはNormalモードよりも徹底的に除去します。DeReverbはさらにリバーブを除去し、モノリバーブを除去することができますが、高周波のリバーブが強い内容に対しては非常に効果的ではありません。
ディリバーブ/ディレイに関する注意点:
1. DeEcho-DeReverbモデルの処理時間は、他の二つのDeEchoモデルの約二倍です。
2. MDX-Net-Dereverbモデルは非常に遅いです。
3. 推奨される最もクリーンな設定は、最初にMDX-Netを適用し、その後にDeEcho-Aggressiveを適用することです。", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "ハイフンで区切って使用するGPUの番号を入力します。例えば0-1-2はGPU0、GPU1、GPU2を使用します", - "伴奏人声分离&去混响&去回声": "伴奏ボーカル分離&残響除去&エコー除去", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "保存ファイル名", - "保存的文件名, 默认空为和源文件同名": "保存するファイル名、デフォルトでは空欄で元のファイル名と同じ名前になります", - "保存的模型名不带后缀": "拡張子のない保存するモデル名", - "保存频率save_every_epoch": "エポックごとの保存頻度", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "明確な子音と呼吸音を保護し、電子音の途切れやその他のアーティファクトを防止します。0.5でオフになります。下げると保護が強化されますが、indexの効果が低下する可能性があります。", - "修改": "変更", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報の修正(weightsフォルダから抽出された小さなモデルファイルのみ対応)", - "停止音频转换": "音声変換を停止", - "全流程结束!": "全工程が完了!", - "刷新音色列表和索引路径": "音源リストとインデックスパスの更新", - "加载模型": "モデルをロード", - "加载预训练底模D路径": "事前学習済みのDモデルのパス", - "加载预训练底模G路径": "事前学習済みのGモデルのパス", - "单次推理": "单次推理", - "卸载音色省显存": "音源を削除してメモリを節約", - "变调(整数, 半音数量, 升八度12降八度-12)": "ピッチ変更(整数、半音数、上下オクターブ12-12)", - "后处理重采样至最终采样率,0为不进行重采样": "最終的なサンプリングレートへのポストプロセッシングのリサンプリング リサンプリングしない場合は0", - "否": "いいえ", - "启用相位声码器": "启用相位声码器", - "响应阈值": "反応閾値", - "响度因子": "ラウドネス係数", - "处理数据": "データ処理", - "导出Onnx模型": "Onnxに変換", - "导出文件格式": "エクスポート形式", - "常见问题解答": "よくある質問", - "常规设置": "一般設定", - "开始音频转换": "音声変換を開始", - "很遗憾您这没有能用的显卡来支持您训练": "トレーニングに対応したGPUが動作しないのは残念です。", - "性能设置": "パフォーマンス設定", - "总训练轮数total_epoch": "総エポック数", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換、変換する音声フォルダを入力、または複数の音声ファイルをアップロードし、指定したフォルダ(デフォルトのopt)に変換した音声を出力します。", - "指定输出主人声文件夹": "マスターの出力音声フォルダーを指定する", - "指定输出文件夹": "出力フォルダを指定してください", - "指定输出非主人声文件夹": "マスター以外の出力音声フォルダーを指定する", - "推理时间(ms):": "推論時間(ms):", - "推理音色": "音源推論", - "提取": "抽出", - "提取音高和处理数据使用的CPU进程数": "ピッチの抽出やデータ処理に使用するCPUスレッド数", - "是": "はい", - "是否仅保存最新的ckpt文件以节省硬盘空间": "ハードディスク容量を節約するため、最新のckptファイルのみを保存しますか?", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時点の小モデルを全部weightsフォルダに保存するかどうか", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "すべてのトレーニングデータをメモリにキャッシュするかどうか。10分以下の小さなデータはキャッシュしてトレーニングを高速化できますが、大きなデータをキャッシュするとメモリが破裂し、あまり速度が上がりません。", - "显卡信息": "GPU情報", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本ソフトウェアはMITライセンスに基づくオープンソースであり、製作者は本ソフトウェアに対していかなる責任を持ちません。本ソフトウェアの利用者および本ソフトウェアから派生した音源(成果物)を配布する者は、本ソフトウェアに対して自身で責任を負うものとします。
この条項に同意しない場合、パッケージ内のコードやファイルを使用や参照を禁じます。詳しくはLICENSEをご覧ください。", - "查看": "表示", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報を表示する(小さいモデルファイルはweightsフォルダーからのみサポートされています)", - "检索特征占比": "検索特徴率", - "模型": "モデル", - "模型推理": "モデル推論", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "モデル抽出(ログフォルダー内の大きなファイルのモデルパスを入力)、モデルを半分までトレーニングし、自動的に小さいファイルモデルを保存しなかったり、中間モデルをテストしたい場合に適用されます。", - "模型是否带音高指导": "モデルに音高ガイドを付けるかどうか", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "モデルに音高ガイドがあるかどうか(歌唱には必要ですが、音声には必要ありません)", - "模型是否带音高指导,1是0否": "モデルに音高ガイドを付けるかどうか、1は付ける、0は付けない", - "模型版本型号": "モデルのバージョン", - "模型融合, 可用于测试音色融合": "モデルのマージ、音源のマージテストに使用できます", - "模型路径": "モデルパス", - "每张显卡的batch_size": "GPUごとのバッチサイズ", - "淡入淡出长度": "フェードイン/フェードアウト長", - "版本": "バージョン", - "特征提取": "特徴抽出", - "特征检索库文件路径,为空则使用下拉的选择结果": "特徴検索ライブラリへのパス 空の場合はドロップダウンで選択", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性から女性へは+12キーをお勧めします。女性から男性へは-12キーをお勧めします。音域が広すぎて音質が劣化した場合は、適切な音域に自分で調整してください。", - "目标采样率": "目標サンプリングレート", - "算法延迟(ms):": "算法延迟(ms):", - "自动检测index路径,下拉式选择(dropdown)": "インデックスパスの自動検出 ドロップダウンで選択", - "融合": "マージ", - "要改的模型信息": "変更するモデル情報", - "要置入的模型信息": "挿入するモデル情報", - "训练": "トレーニング", - "训练模型": "モデルのトレーニング", - "训练特征索引": "特徴インデックスのトレーニング", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "トレーニング終了時に、トレーニングログやフォルダ内のtrain.logを確認することができます", - "请指定说话人id": "話者IDを指定してください", - "请选择index文件": "indexファイルを選択してください", - "请选择pth文件": "pthファイルを選択してください", - "请选择说话人id": "話者IDを選択してください", - "转换": "変換", - "输入实验名": "モデル名", - "输入待处理音频文件夹路径": "処理するオーディオファイルのフォルダパスを入力してください", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理対象音声フォルダーのパスを入力してください(エクスプローラーのアドレスバーからコピーしてください)", - "输入待处理音频文件路径(默认是正确格式示例)": "処理対象音声ファイルのパスを入力してください(デフォルトは正しいフォーマットの例です)", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "入力ソースの音量エンベロープと出力音量エンベロープの融合率 1に近づくほど、出力音量エンベロープの割合が高くなる", - "输入监听": "入力を監視", - "输入训练文件夹路径": "トレーニング用フォルダのパスを入力してください", - "输入设备": "入力デバイス", - "输入降噪": "入力ノイズの低減", - "输出信息": "出力情報", - "输出变声": "音声変換の出力", - "输出设备": "出力デバイス", - "输出降噪": "出力ノイズの低減", - "输出音频(右下角三个点,点了可以下载)": "出力音声(右下の三点をクリックしてダウンロードできます)", - "选择.index文件": ".indexファイルを選択", - "选择.pth文件": ".pthファイルを選択", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "ピッチ抽出アルゴリズムの選択、歌声はpmで高速化でき、harvestは低音が良いが信じられないほど遅く、crepeは良く動くがGPUを食います。", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "ピッチ抽出アルゴリズムの選択、歌声はpmで高速化でき、harvestは低音が良いが信じられないほど遅く、crepeは良く動くがGPUを喰います", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "ピッチ抽出アルゴリズムの選択:歌声はpmで高速化でき、入力した音声が高音質でCPUが貧弱な場合はdioで高速化でき、harvestの方が良いが遅く、rmvpeがベストだがCPU/GPUを若干食います。", - "采样率:": "采样率:", - "采样长度": "サンプル長", - "重载设备列表": "デバイスリストをリロードする", - "音调设置": "音程設定", - "音频设备(请使用同种类驱动)": "オーディオデバイス(同じ種類のドライバーを使用してください)", - "音高算法": "ピッチアルゴリズム", - "额外推理时长": "追加推論時間", - "打标工具WebUI已开启": "ラベリングツールWebUIが開始されました", - "打标工具WebUI已关闭": "ラベリングツールWebUIが終了しました", - "UVR5已开启": "UVR5が開始されました", - "UVR5已关闭": "UVR5が終了しました", - "TTS推理进程已开启": "TTS推論プロセスが開始されました", - "TTS推理进程已关闭": "TTS推理プロセスが終了しました", - "ASR任务开启": "ASRタスクが開始されました", - "ASR任务完成": "ASRタスクが完了しました", - "已有正在进行的ASR任务,需先终止才能开启下一次任务": "既に進行中のASRタスクがあります。新たなタスクを開始する前に終了させてください", - "已终止ASR进程": "ASRプロセスが終了しました", - "SoVITS训练开始": "SoVITSのトレーニングが開始されました", - "SoVITS训练完成": "SoVITSのトレーニングが完了しました", - "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "既に進行中のSoVITSのトレーニングタスクがあります。新たなタスクを開始する前に終了させてください", - "已终止SoVITS训练": "SoVITSのトレーニングが終了しました", - "GPT训练开始": "GPTトレーニング開始", - "GPT训练完成": "GPTトレーニング完了", - "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "既に進行中のGPTトレーニングタスクがあります。新たなタスクを開始する前に終了させてください", - "已终止GPT训练": "GPTトレーニングが終了しました", - "切割执行中": "カット中", - "切割结束": "カット終了", - "已有正在进行的切割任务,需先终止才能开启下一次任务": "既に進行中のカットタスクがあります。新たなタスクを開始する前に終了させてください", - "已终止所有切割进程": "すべてのカットプロセスが終了しました", - "文本进程执行中": "テキストプロセス実行中", - "文本进程结束": "テキストプロセス終了", - "已有正在进行的文本任务,需先终止才能开启下一次任务": "既に進行中のテキストタスクがあります。新たなタスクを開始する前に終了させてください", - "已终止所有文本进程": "すべてのテキストプロセスが終了しました", - "SSL提取进程执行中": "SSL抽出プロセス実行中", - "SSL提取进程结束": "SSL抽出プロセス終了", - "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "既に進行中のSSL抽出タスクがあります。新たなタスクを開始する前に終了させてください", - "已终止所有1b进程": "すべての1bプロセスが終了しました", - "语义token提取进程执行中": "意味トークン抽出プロセス実行中", - "语义token提取进程结束": "意味トークン抽出プロセス終了", - "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "既に進行中の意味トークン抽出タスクがあります。新たなタスクを開始する前に終了させてください", - "已终止所有语义token进程": "すべての意味トークンプロセスが終了しました", - "语义token提取进程已开始": "意味トークン抽出プロセスが開始されました", - "语义token提取进程已结束": "意味トークン抽出プロセスが終了しました", - "语义token提取进程已终止": "意味トークン抽出プロセスが終了しました", - "语义token提取进程正在进行中": "意味トークン抽出プロセスが進行中です", - "语义token提取任务已完成": "意味トークン抽出タスクが完了しました", - "语义token提取任务正在进行中": "意味トークン抽出タスクが進行中です", - "语义token提取任务已开始": "意味トークン抽出タスクが開始されました", - "语义token提取任务已终止": "意味トークン抽出タスクが終了しました", - "ワンクリックで三つのプロセスを開始": "ワンクリックで三つのプロセスを開始します", - "ワンクリックで三つのプロセスを終了": "ワンクリックで三つのプロセスを終了します", - "ワンクリックで三つのプロセスを中止": "ワンクリックで三つのプロセスを中止します", - "ワンクリックで三つのプロセスが進行中": "ワンクリックで三つのプロセスが進行中です", - "ワンクリックで三つのタスクが完了": "ワンクリックで三つのタスクが完了しました", - "ワンクリックで三つのタスクが進行中": "ワンクリックで三つのタスクが進行中です", - "ワンクリックで三つのタスクを開始": "ワンクリックで三つのタスクを開始します", - "ワンクリックで三つのタスクを中止": "ワンクリックで三つのタスクを中止します", - "0-前置数据集获取工具": "0-前置データセット取得ツール", - "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5ボーカル伴奏分離&リバーブディレイ除去ツール", - "是否开启UVR5-WebUI": "UVR5-WebUIを開始しますか", - "UVR5进程输出信息": "UVR5プロセス出力情報", - "0b-语音切分工具": "0b-音声切断ツール", - "音频自动切分输入路径,可文件可文件夹": "オーディオ自動切断入力パス、ファイルまたはフォルダー可", - "切分后的子音频的输出根目录": "切断後のサブオーディオの出力ルートディレクトリ", - "threshold:音量小于这个值视作静音的备选切割点": "threshold:音量がこの値より小さい場合は、サイレントと見なされる代替切断点", - "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:各セクションの最小長さ、最初のセクションが短すぎる場合は、この値を超えるまで後続のセクションと連続しています", - "min_interval:最短切割间隔": "min_interval:最短切断間隔", - "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size:音量曲線の計算方法、小さいほど精度が高く計算量が多くなります(精度が高いほど効果が良いとは限りません)", - "max_sil_kept:切完后静音最多留多长": "max_sil_kept:切断後のサイレントの最大保持長", - "开启语音切割": "音声切断を開始", - "终止语音切割": "音声切断を終了", - "max:归一化后最大值多少": "max:正規化後の最大値", - "alpha_mix:混多少比例归一化后音频进来": "alpha_mix:どの程度の割合で正規化後のオーディオを混合するか", - "切割使用的进程数": "切断に使用されるプロセス数", - "语音切割进程输出信息": "音声切断プロセス出力情報", + "很遗憾您这没有能用的显卡来支持您训练": "残念ながら、トレーニングをサポートする利用可能なグラフィックカードがありません", + "UVR5已开启": "UVR5がオンになっています", + "UVR5已关闭": "UVR5がオフになっています", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェアを使用する者、ソフトウェアから導出される音声を広める者は、自己責任で行ってください。
この条件を認めない場合、ソフトウェアパッケージ内の任意のコードやファイルを使用または引用することはできません。詳細はルートディレクトリのLICENSEを参照してください。", + "0-前置数据集获取工具": "0-データセット取得ツールの事前処理", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5ボーカルアカンパニメント分離&リバーブおよびディレイ除去ツール", + "是否开启UVR5-WebUI": "UVR5-WebUIをオンにしますか", + "UVR5进程输出信息": "UVR5プロセスの出力情報", + "0b-语音切分工具": "0b-音声分割ツール", + "音频自动切分输入路径,可文件可文件夹": "オーディオの自動分割入力パス、ファイルまたはフォルダを指定できます", + "切分后的子音频的输出根目录": "分割後のサブオーディオの出力ルートディレクトリ", + "threshold:音量小于这个值视作静音的备选切割点": "閾値:この値未満の音量は静音と見なされ、代替のカットポイントとして扱われます", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length:各セグメントの最小長さ。最初のセグメントが短すぎる場合、連続して後続のセグメントに接続され、この値を超えるまで続きます。", + "min_interval:最短切割间隔": "min_interval:最短カット間隔", + "hop_size:怎么算音量曲线,越小精度越大計算量越高(不是精度越大效果越好)": "hop_size:音量曲線を計算する方法。値が小さいほど精度が高くなり、計算量が増加します(精度が高いほど効果が良いわけではありません)。", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept:切り終えた後、最大でどれだけ静かにするか", + "开启语音切割": "音声の分割を開始", + "终止语音切割": "音声の分割を停止", + "max:归一化后最大值多少": "max:正規化後の最大値", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix:正規化後のオーディオが入る割合", + "切割使用的进程数": "分割に使用されるプロセス数", + "语音切割进程输出信息": "音声分割プロセスの出力情報", "0c-中文批量离线ASR工具": "0c-中国語バッチオフラインASRツール", "开启离线批量ASR": "オフラインバッチASRを開始", - "终止ASR进程": "ASRプロセスを終了", - "批量ASR(中文only)输入文件夹路径": "バッチASR(中国語のみ)入力フォルダパス", - "ASR进程输出信息": "ASRプロセス出力情報", - "0d-语音文本校对标注工具": "0d-音声テキスト校正アノテーションツール", - "是否开启打标WebUI": "ラベリングWebUIを開始しますか", - "打标数据标注文件路径": "ラベリングデータアノテーションファイルパス", - "打标工具进程输出信息": "ラベリングツールプロセス出力情報", - "文本进程结束, SSL提取进程执行中": "テキストプロセス終了、SSL抽出プロセス実行中", - "文本进程结束, SSL提取进程结束": "テキストプロセス終了、SSL抽出プロセス終了", - "一键三连进程执行中": "ワンクリック三連プロセス実行中", - "一键三连进程结束": "ワンクリック三連プロセス終了", - "SoVITS训练进程执行中": "SoVITS訓練プロセス実行中", - "SoVITS训练进程结束": "SoVITS訓練プロセス終了", - "GPT训练进程执行中": "GPT訓練プロセス実行中", - "GPT训练进程结束": "GPT訓練プロセス終了", - "推理进程执行中": "推論プロセス実行中", - "推理进程结束": "推論プロセス終了", - "预训练的SoVITS-G模型路径": "事前学習済みのSoVITS-Gモデルのパス", - "预训练的SoVITS-D模型路径": "事前学習済みのSoVITS-Dモデルのパス", - "预训练的GPT模型路径": "事前学習済みのGPTモデルのパス", - "GPU卡号以-分割,每个卡号一个进程": "GPUカード番号は-で区切り、各カード番号に1つのプロセス", - "预训练的中文BERT模型路径": "事前学習済みの中国語BERTモデルのパス", - "开启文本获取": "テキスト取得を開始", - "终止文本获取进程": "テキスト取得プロセスを終了", - "文本进程输出信息": "テキストプロセス出力情報", - "预训练的SSL模型路径": "事前学習済みのSSLモデルのパス", + "终止ASR进程": "ASRプロセスを停止", + "批量ASR(中文only)输入文件夹路径": "バッチASR(中国語のみ)の入力フォルダパス", + "ASR进程输出信息": "ASRプロセスの出力情報", + "0d-语音文本校对标注工具": "0d-音声テキストの校正アノテーションツール", + "是否开启打标WebUI": "WebUIを使用したアノテーションを開始しますか", + "打标数据标注文件路径": "アノテーションデータのファイルパス", + "打标工具进程输出信息": "アノテーションツールプロセスの出力情報", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*実験/モデル名", + "显卡信息": "グラフィックカード情報", + "预训练的SoVITS-G模型路径": "事前にトレーニングされたSoVITS-Gモデルのパス", + "预训练的SoVITS-D模型路径": "事前にトレーニングされたSoVITS-Dモデルのパス", + "预训练的GPT模型路径": "事前にトレーニングされたGPTモデルのパス", + "1A-训练集格式化工具": "1A-トレーニングデータのフォーマットツール", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/実験名ディレクトリには23456で始まるファイルとフォルダが含まれている必要があります", + "*文本标注文件": "*テキスト注釈ファイル", + "*训练集音频文件目录": "*トレーニングデータのオーディオファイルディレクトリ", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "トレーニングデータのオーディオファイルディレクトリ。リストファイル内の波形に対応するファイル名を連結します。", + "1Aa-文本内容": "1Aa-テキストの内容", + "GPU卡号以-分割,每个卡号一个进程": "GPUカード番号はハイフンで区切り、各カード番号ごとに1つのプロセスが実行されます", + "预训练的中文BERT模型路径": "事前にトレーニングされた中文BERTモデルのパス", + "开启文本获取": "テキストの取得を開始", + "终止文本获取进程": "テキスト取得プロセスを停止", + "文本进程输出信息": "テキストプロセスの出力情報", + "1Ab-SSL自监督特征提取": "1Ab-SSLセルフスーパーバイズ特徴抽出", + "预训练的SSL模型路径": "事前にトレーニングされたSSLモデルのパス", "开启SSL提取": "SSL抽出を開始", - "终止SSL提取进程": "SSL抽出プロセスを終了", - "SSL进程输出信息": "SSLプロセス出力情報", - "开启语义token提取": "セマンティックトークン抽出を開始", - "终止语义token提取进程": "セマンティックトークン抽出プロセスを終了", - "语义token提取进程输出信息": "セマンティックトークン抽出プロセス出力情報", - "开启一键三连": "ワンクリック三連を開始", - "终止一键三连": "ワンクリック三連を終了", - "一键三连进程输出信息": "ワンクリック三連プロセス出力情報", - "1A-訓練集格式化工具": "1A-トレーニングセットフォーマットツール", - "输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/実験名ディレクトリには、23456で始まるファイルとフォルダが存在する必要があります", - "*文本标注文件": "*テキストアノテーションファイル", - "*训练集音频文件目录": "*トレーニングセットオーディオファイルディレクトリ", - "训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。": "トレーニングセットオーディオファイルディレクトリ-結合-listファイル内の波形に対応するファイル名(フルパスではありません)。", - "1Aa-文本内容": "1Aa-テキスト内容", + "终止SSL提取进程": "SSL抽出プロセスを停止", + "SSL进程输出信息": "SSLプロセスの出力情報", "1Ac-语义token提取": "1Ac-セマンティックトークン抽出", - "1Aabc-训练集格式化一键三连": "1Aabc-トレーニングセットフォーマットワンクリック三連", - "1B-微调训练": "1B-微調整訓練", - "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS訓練。共有モデルファイルはSoVITS_weightsに出力されます。", - "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT訓練。共有モデルファイルはGPT_weightsに出力されます。", - "1C-推理": "1C-推論", - "2-GPT-SoVITS-变声": "2-GPT-SoVITS-ボイスチェンジ", - "开启SoVITS训练": "SoVITS訓練を開始", - "终止SoVITS训练": "SoVITS訓練を終了", - "SoVITS训练进程输出信息": "SoVITS訓練プロセス出力情報", - "开启GPT训练": "GPT訓練を開始", - "终止GPT训练": "GPT訓練を終了", - "GPT训练进程输出信息": "GPT訓練プロセス出力情報", - "是否开启TTS推理WebUI": "TTS推理WebUIを開始しますか", - "TTS推理WebUI进程输出信息": "TTS推理WebUIプロセス出力情報", - "施工中,请静候佳音": "工事中、お待ちください", - "*实验/模型名": "実験/モデル名", - "1A-训练集格式化工具": "1A-トレーニングセットフォーマットツール", - "1Ab-SSL自监督特征提取": "1Ab-SSL自己監督による特徴抽出", - "总训练轮数total_epoch,不建议太高": "トータルトレーニングラウンド数total_epoch、あまり高く設定しないことをお勧めします", + "开启语义token提取": "セマンティックトークン抽出を開始", + "终止语义token提取进程": "セマンティックトークン抽出プロセスを停止", + "语义token提取进程输出信息": "セマンティックトークン抽出プロセスの出力情報", + "1Aabc-训练集格式化一键三连": "1Aabc-トレーニングデータのフォーマットワンクリック三連", + "开启一键三连": "ワンクリック三連を開始", + "终止一键三连": "ワンクリック三連を停止", + "一键三连进程输出信息": "ワンクリック三連プロセスの出力情報", + "1B-微调训练": "1B-ファインチューニングトレーニング", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITSトレーニング。共有用のモデルファイルはSoVITS_weightsディレクトリに出力されます。", + "每张显卡的batch_size": "各グラフィックカードのバッチサイズ", + "总训练轮数total_epoch,不建议太高": "総トレーニングエポック数total_epoch、高すぎないようにお勧めします", "文本模块学习率权重": "テキストモジュールの学習率の重み", - "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weightsとGPT_weightsに保存された訓練済みモデルを選択します。デフォルトの1つはベースモデルで、5秒間のZero Shot TTSを体験するために使用されます。", + "保存频率save_every_epoch": "保存頻度save_every_epoch", + "是否仅保存最新的ckpt文件以节省硬盘空间": "最新のckptファイルのみを保存してディスクスペースを節約するかどうか", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時間点で最終的な小さなモデルをweightsフォルダに保存するかどうか", + "开启SoVITS训练": "SoVITSトレーニングを開始", + "终止SoVITS训练": "SoVITSトレーニングを停止", + "SoVITS训练进程输出信息": "SoVITSトレーニングプロセスの出力情報", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPTトレーニング。共有用のモデルファイルはGPT_weightsディレクトリに出力されます。", + "总训练轮数total_epoch": "総トレーニングエポック数total_epoch", + "开启GPT训练": "GPTトレーニングを開始", + "终止GPT训练": "GPTトレーニングを停止", + "GPT训练进程输出信息": "GPTトレーニングプロセスの出力情報", + "1C-推理": "1C-推論", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weightsおよびGPT_weightsに保存されたモデルを選択します。デフォルトのものはプレトレインであり、ゼロショットTTSを体験できます。", "*GPT模型列表": "*GPTモデルリスト", "*SoVITS模型列表": "*SoVITSモデルリスト", - "GPU卡号,只能填1个整数": "GPUカード番号、整数のみ入力可能", - "刷新模型路径": "モデルパスを更新", - "*请上传并填写参考信息": "*参考情報をアップロードして記入してください", - "请上传参考音频": "*参考音声をアップロードしてください", - "参考音频的文本": "*参考音声のテキスト", - "参考音频的语种": "参考音声の言語", - "*请填写需要合成的目标文本": "*合成する目標テキストを入力してください", - "需要合成的文本": "*合成するテキスト", - "需要合成的语种": "*合成する言語", - "合成语音": "音声合成", - "输出的语音": "*出力音声", - "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "テキスト分割ツール。長すぎるテキストを合成すると結果が必ずしも良くない可能性があるため、長すぎる場合は先に切ることをお勧めします。合成はテキストの改行に基づいて分割してから再結合します。", - "需要合成的切分前文本": "合成する前の分割テキストが必要", - "凑五句一切": "五文を一つにまとめる", - "凑50字一切": "50文字を一つにまとめる", - "按中文句号。切": "中国語の句点で切る。", + "GPU卡号,只能填1个整数": "GPU番号、1つの整数しか入力できません", + "刷新模型路径": "モデルのパスを更新", + "是否开启TTS推理WebUI": "TTS推論WebUIを開く", + "TTS推理WebUI进程输出信息": "TTS推論WebUIプロセスの出力情報", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-ボイスチェンジャー", + "施工中,请静候佳音": "施工中、お待ちください", + "TTS推理进程已开启": "TTS推論プロセスが開始されました", + "TTS推理进程已关闭": "TTS推論プロセスが終了しました", + "打标工具WebUI已开启": "校正ツールWebUIが開始されました", + "打标工具WebUI已关闭": "校正ツールWebUIが終了しました", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "このソフトウェアはMITライセンスでオープンソース化されており、作者はソフトウェアに対して一切の制御権を持っていません。ソフトウェアを使用する者、ソフトウェアからエクスポートされた音声を伝播する者は、自己の責任を負います。この条件を受け入れない場合は、ソフトウェアパッケージ内の任意のコードやファイルを使用または引用することはできません。詳細はLICENSEを参照してください。", + "*请上传并填写参考信息": "*参照情報をアップロードして記入してください", + "*请填写需要合成的目标文本": "*合成が必要な対象のテキストを記入してください", + "ASR任务开启:%s": "ASRタスクが開始されました:%s", + "GPT训练完成": "GPTトレーニングが完了しました", + "GPT训练开始:%s": "GPTトレーニングが開始されました:%s", + "SSL提取进程执行中": "SSL抽出プロセス実行中", + "SSL提取进程结束": "SSL抽出プロセスが終了しました", + "SoVITS训练完成": "SoVITSトレーニングが完了しました", + "SoVITS训练开始:%s": "SoVITSトレーニングが開始されました:%s", + "一键三连中途报错": "ワンクリックフォーマット中にエラーが発生しました", + "一键三连进程结束": "ワンクリックフォーマットが終了しました", + "中文": "中国語", + "凑50字一切": "50文字ずつカット", + "凑五句一切": "5つの文ごとにカット", "切分后文本": "分割後のテキスト", - "后续将支持混合语种编码文本输入。": "今後、混合言語エンコードテキストの入力をサポートします。" + "切割执行中": "オーディオの分割中", + "切割结束": "オーディオの分割が完了しました", + "参考音频的文本": "参照オーディオのテキスト", + "参考音频的语种": "参照オーディオの言語", + "合成语音": "推論を開始", + "后续将支持混合语种编码文本输入。": "後で混合言語コードテキストの入力がサポートされるようになります。", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "すでに進行中のASRタスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "すでに進行中のGPTトレーニングタスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "すでに進行中のSSL抽出タスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "すでに進行中のSoVITSトレーニングタスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "すでに進行中のワンクリックフォーマットタスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "すでに進行中のオーディオの分割タスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "すでに進行中のTTS校正タスクがあります。次のタスクを開始する前に停止してください", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "すでに進行中の意味トークン抽出タスクがあります。次のタスクを開始する前に停止してください", + "已终止ASR进程": "ASRタスクが終了しました", + "已终止GPT训练": "GPTトレーニングが終了しました", + "已终止SoVITS训练": "SoVITSトレーニングが終了しました", + "已终止所有1a进程": "すべての1aタスクが終了しました", + "已终止所有1b进程": "すべての1bタスクが終了しました", + "已终止所有一键三连进程": "すべてのワンクリックフォーマットタスクが終了しました", + "已终止所有切割进程": "すべてのオーディオの分割タスクが終了しました", + "已终止所有语义token进程": "すべての意味トークンタスクが終了しました", + "按中文句号。切": "中国語の句点でカット", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "テキストスライサーツール。長文を変換すると効果が不安定になる可能性があるため、長文の場合は事前に切り分けることをお勧めします。推論時には、テキストを個別に推論し、それを組み合わせて再構築します。", + "文本进程执行中": "テキスト処理中", + "文本进程结束": "テキスト処理が終了しました", + "日文": "日本語", + "英文": "英語", + "语义token提取进程执行中": "意味トークン抽出実行中", + "语义token提取进程结束": "意味トークン抽出が終了しました", + "请上传参考音频": "参照オーディオをアップロードしてください", + "输入路径不存在": "入力パスが存在しません", + "输入路径存在但既不是文件也不是文件夹": "入力ディレクトリが存在しますが、ファイルでもフォルダでもありません", + "输出的语音": "推論結果", + "进度:1a-done": "進捗:1a完了", + "进度:1a-done, 1b-ing": "進捗:1a完了、1b進行中", + "进度:1a-ing": "進捗:1a進行中", + "进度:1a1b-done": "進捗:1a1b完了", + "进度:1a1b-done, 1cing": "進捗:1a1b完了、1c進行中", + "进度:all-done": "進捗:all-done", + "需要合成的切分前文本": "推論が必要な分割前のテキスト", + "需要合成的文本": "推論テキスト", + "需要合成的语种": "推論テキストの言語", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "3以上の場合:収穫音高の認識結果に中央値フィルタリングを適用します。値はフィルターの半径を表し、息遣いを減少させることができます。", + "A模型权重": "モデルAの重み (w):", + "A模型路径": "モデルAのパス:", + "B模型路径": "モデルBのパス:", + "E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線ファイル(オプション)。1行に1つの音高があります。デフォルトのF0とピッチ変調の代わりに使用します:", + "Index Rate": "インデックスレート", + "Onnx导出": "Onnxエクスポート", + "Onnx输出路径": "Onnxエクスポートパス:", + "RVC模型路径": "RVCモデルパス:", + "ckpt处理": "ckpt処理", + "harvest进程数": "harvestピッチアルゴリズムに使用するCPUプロセス数", + "index文件路径不可包含中文": "インデックスファイルパスには中文を含めないでください", + "pth文件路径不可包含中文": "pthファイルパスには中文を含めないでください", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "異なるプロセスカードの入力に使用するGPUインデックスを'-'で区切って入力します。例:0-0-1はGPU0で2つのプロセスを実行し、GPU1で1つのプロセスを実行します", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "ステップ1:実験構成を記入します。実験データは「logs」フォルダに保存され、各実験には別々のフォルダがあります。実験名のパスを手動で入力する必要があり、実験構成、ログ、トレーニングされたモデルファイルが含まれています。", + "step1:正在处理数据": "ステップ1:データ処理中", + "step2:正在提取音高&正在提取特征": "ステップ2:ピッチ抽出と特徴抽出中", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "ステップ2a:トレーニングフォルダ内のデコード可能なすべてのファイルを自動的にトラバースし、スライス正規化を実行します。実験ディレクトリに2つのwavフォルダが生成されます。現時点では、単一の歌手/スピーカーのトレーニングのみがサポートされています。", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "ステップ2b:CPUを使用してピッチを抽出します(モデルにピッチがある場合)、GPUを使用して特徴を抽出します(GPUインデックスを選択します):", + "step3: 填写训练设置, 开始训练模型和索引": "ステップ3:トレーニング設定を入力し、モデルとインデックスのトレーニングを開始します", + "step3a:正在训练模型": "ステップ3a:モデルのトレーニングが開始されました", + "一键训练": "ワンクリックトレーニング", + "也可批量输入音频文件, 二选一, 优先读文件夹": "複数のオーディオファイルもインポートできます。フォルダパスが存在する場合、この入力は無視されます。", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "GPUインデックスを'-'で区切って入力します。例:0-1-2はGPU 0、1、および2を使用します。", + "伴奏人声分离&去混响&去回声": "ボーカル/伴奏の分離と残響の除去", + "使用模型采样率": "使用するモデルのサンプルレート", + "使用设备采样率": "使用デバイスのサンプルレート", + "保存名": "保存名:", + "保存的文件名, 默认空为和源文件同名": "保存ファイル名(デフォルト:元のファイルと同じ):", + "保存的模型名不带后缀": "保存されるモデル名(拡張子なし):", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "清濁音と呼吸音を保護し、電子音楽の撕裂などのアーティファクトを防ぎます。0.5まで引っ張ると無効になり、保護力を高めるには値を下げますが、索引の精度が低下する可能性があります。", + "修改": "変更", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報の変更('weights'フォルダから抽出された小さなモデルファイルのみサポート)", + "停止音频转换": "オーディオ変換を停止", + "全流程结束!": "すべてのプロセスが完了しました!", + "刷新音色列表和索引路径": "ボイスリストとインデックスパスをリフレッシュ", + "加载模型": "モデルの読み込み", + "加载预训练底模D路径": "事前にトレーニングされたベースモデルDのパスをロード:", + "加载预训练底模G路径": "事前にトレーニングされたベースモデルGのパスをロード:", + "单次推理": "単一推論", + "卸载音色省显存": "GPUメモリを節約するためにボイスをアンロード:", + "变调(整数, 半音数量, 升八度12降八度-12)": "トランスポーズ(整数、半音の数、8度上げ: 12、8度下げ: -12):", + "后处理重采样至最终采样率,0为不进行重采样": "後処理でオーディオを最終のサンプルレートに再サンプリングします。リサンプリングを行わない場合は0に設定してください:", + "否": "いいえ", + "启用相位声码器": "位相音声コーダーを有効にする", + "响应阈值": "応答閾値", + "响度因子": "音量ファクター", + "处理数据": "データ処理", + "导出Onnx模型": "Onnxモデルのエクスポート", + "导出文件格式": "エクスポートファイル形式", + "常见问题解答": "よくある質問 (FAQ)", + "常规设置": "一般的な設定", + "开始音频转换": "オーディオ変換を開始", + "性能设置": "性能設定", + "批量推理": "一括推論", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換。変換するオーディオファイルが含まれるフォルダを入力するか、複数のオーディオファイルをアップロードします。変換されたオーディオは指定されたフォルダ (デフォルト: 'opt') に出力されます。", + "指定输出主人声文件夹": "ボーカルの出力フォルダを指定:", + "指定输出文件夹": "出力フォルダの指定:", + "指定输出非主人声文件夹": "伴奏の出力フォルダを指定:", + "推理时间(ms):": "推論時間 (ms):", + "推理音色": "推論ボイス:", + "提取": "抽出", + "提取音高和处理数据使用的CPU进程数": "ピッチ抽出およびデータ処理に使用されるCPUプロセスの数:", + "是": "はい", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "すべてのトレーニングセットをGPUメモリにキャッシュするかどうか。小さなデータセット (10分以下) をキャッシュするとトレーニングが高速化されますが、大きなデータセットをキャッシュするとGPUメモリが消費され、あまり速度が向上しないかもしれません:", + "查看": "表示", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報を表示します ( 'weights' フォルダから抽出された小さなモデルファイルにのみ対応):", + "检索特征占比": "特徴の検索比率 (アクセントの強度を制御、高すぎるとアーティファクトが発生します):", + "模型": "モデル", + "模型推理": "モデル推論", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "モデル抽出 ( 'logs' フォルダ内の大きなファイルモデルのパスを入力)。トレーニングを途中で停止して手動で小さなモデルファイルを抽出および保存したい場合、または中間モデルをテストしたい場合に使用します:", + "模型是否带音高指导": "モデルにピッチガイダンスがあるかどうか:", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "モデルにピッチガイダンスがあるかどうか (歌唱には必須、音声にはオプション):", + "模型是否带音高指导,1是0否": "モデルにピッチガイダンスがあるかどうか (1: はい、0: いいえ):", + "模型版本型号": "モデルアーキテクチャバージョン:", + "模型融合, 可用于测试音色融合": "モデルフュージョン、音色フュージョンをテストするために使用できます", + "模型路径": "モデルへのパス:", + "淡入淡出长度": "フェードの長さ", + "版本": "バージョン", + "特徴提取": "特徴抽出", + "特徴检索库文件路径,为空则使用下拉的选择结果": "特徴インデックスファイルへのパス。空白の場合はドロップダウンから選択された結果が使用されます:", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性から女性への変換では+12キーが推奨され、女性から男性への変換では-12キーが推奨されます。音域が広すぎて音声が歪む場合は、適切な音域に手動で調整することもできます。", + "目标采样率": "目標サンプルレート:", + "算法延迟(ms):": "アルゴリズムの遅延(ms):", + "自动检测index路径,下拉式选择(dropdown)": "indexパスを自動検出し、ドロップダウンから選択します:", + "融合": "フュージョン", + "要改的模型信息": "変更するモデル情報:", + "要置入的模型信息": "挿入するモデル情報:", + "训练": "トレーニング", + "训练模型": "モデルのトレーニング", + "训练特征索引": "特徴索引のトレーニング", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "トレーニングが完了しました。トレーニングログはコンソールまたは実験フォルダの 'train.log' ファイルで確認できます。", + "请指定说话人id": "話者/歌手のIDを指定してください:", + "请选择index文件": ".index ファイルを選択してください", + "请选择pth文件": ".pth ファイルを選択してください", + "请选择说话人id": "話者/歌手のIDを選択してください:", + "转换": "変換", + "输入实验名": "実験名を入力:", + "输入待处理音频文件夹路径": "処理するオーディオフォルダのパスを入力してください:", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理するオーディオフォルダのパスを入力してください (ファイルマネージャのアドレスバーからコピーしてください):", + "输入待处理音频文件路径(默认是正确格式示例)": "処理するオーディオファイルのパスを入力してください (デフォルトは正しい形式の例です):", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "音量エンベロープのスケーリングを調整します。0に近いほど、元のボーカルの音量に似ます。相対的に低い値に設定すると、ノイズをマスキングし、音量がより自然に聞こえるようになります。1に近いほど、一貫して大きな音量になります:", + "输入监听": "入力ボイスモニター", + "输入训练文件夹路径": "トレーニングフォルダのパスを入力してください:", + "输入设备": "入力デバイス", + "输入降噪": "ノイズリダクションの入力", + "输出信息": "出力情報", + "输出变声": "変換されたボイスの出力", + "输出设备": "出力デバイス", + "输出降噪": "ノイズリダクションの出力", + "输出音频(右下角三个点,点了可以下载)": "オーディオの出力 (右下隅の三点をクリックしてダウンロード)", + "选择.index文件": ".index ファイルを選択してください", + "选择.pth文件": ".pth ファイルを選択してください", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "音高抽出アルゴリズムを選択します。歌声を抽出する場合は 'pm' を使用して高速化できます。高品質な音声でパフォーマンスが向上するが、CPUの使用が悪化する場合は 'dio' を使用できます。 'harvest' は品質が向上しますが、遅いです。 'rmvpe' は最高の品質で、少ないGPUが必要です", + "选择音高提取算法,输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "音高抽出アルゴリズムを選択します。歌声を抽出する場合は 'pm' を使用して高速化できます。高品質な音声でパフォーマンスが向上するが、CPUの使用が悪化する場合は 'dio' を使用できます。 'harvest' は品質が向上しますが、遅いです。 'rmvpe' は最高の品質で、CPU/GPUの使用が少ないです", + "采样率:": "サンプルレート:", + "采样长度": "サンプル長", + "重载设备列表": "デバイスリストを再読み込み", + "音调设置": "ピッチ設定", + "音频设备(请使用同种类驱动)": "オーディオデバイス (同じタイプのドライバを使用してください)", + "音高算法": "音程検出アルゴリズム", + "额外推理时长": "追加推論時間" } From 5688a264960b545f66129b70a9774a59069d7ab1 Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:10:40 +0800 Subject: [PATCH 040/126] Create go-webui.ps1 --- go-webui.ps1 | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 go-webui.ps1 diff --git a/go-webui.ps1 b/go-webui.ps1 new file mode 100644 index 0000000..5d225a4 --- /dev/null +++ b/go-webui.ps1 @@ -0,0 +1,2 @@ +runtime\python.exe webui.py +pause From 945723b3012ee234e669c7b958566f41d109320d Mon Sep 17 00:00:00 2001 From: Han Fangyuan Date: Wed, 24 Jan 2024 11:24:49 +0800 Subject: [PATCH 041/126] fix(docker-compose): relative path volumes --- docker-compose.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index a772c82..ed6f82a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,10 +7,10 @@ services: environment: - is_half=False volumes: - - G:/GPT-SoVITS-DockerTest/output:/workspace/output - - G:/GPT-SoVITS-DockerTest/logs:/workspace/logs - - G:/GPT-SoVITS-DockerTest/SoVITS_weights:/workspace/SoVITS_weights - - G:/GPT-SoVITS-DockerTest/reference:/workspace/reference + - ./output:/workspace/output + - ./logs:/workspace/logs + - ./SoVITS_weights:/workspace/SoVITS_weights + - ./reference:/workspace/reference working_dir: /workspace ports: - "9870:9870" From aed4935fcea3b36da6d2080246cba71524b6bf6b Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:41:23 +0800 Subject: [PATCH 042/126] mps support --- webui.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 4461056..59eb0ff 100644 --- a/webui.py +++ b/webui.py @@ -45,14 +45,17 @@ i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio from multiprocessing import cpu_count + +os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu + n_cpu=cpu_count() -# 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] if_gpu_ok = False +# 判断是否有能用来训练和加速推理的N卡 if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) @@ -61,6 +64,12 @@ if torch.cuda.is_available() or ngpu != 0: if_gpu_ok = True # 至少有一张能用的N卡 gpu_infos.append("%s\t%s" % (i, gpu_name)) mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +# 判断是否支持mps加速 +if torch.backends.mps.is_available(): + if_gpu_ok = True + gpu_infos.append("%s\t%s" % ("0", "Apple GPU")) + mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存 + if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 From 382102c9d03807062d2a85601ca8ea65cd13db1c Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:45:13 +0800 Subject: [PATCH 043/126] mps support, optimized device selection --- config.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index 8b5f378..c9124bf 100644 --- a/config.py +++ b/config.py @@ -1,5 +1,6 @@ import sys,os +import torch # 推理用的指定模型 sovits_path = "" @@ -14,7 +15,12 @@ pretrained_gpt_path = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch= exp_root = "logs" python_exec = sys.executable or "python" -infer_device = "cuda" +if torch.cuda.is_available(): + infer_device = "cuda" +elif torch.mps.is_available(): + infer_device = "mps" +else: + infer_device = "cpu" webui_port_main = 9874 webui_port_uvr5 = 9873 From cb9d8fe8a5dfa4bc4c3aa144f1fb8772e3e0c065 Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 16:47:52 +0800 Subject: [PATCH 044/126] mps support --- api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api.py b/api.py index 376b0bc..0070a16 100644 --- a/api.py +++ b/api.py @@ -35,7 +35,7 @@ parser.add_argument("-dr", "--default_refer_path", type=str, default="", parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") -parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") +parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu / mps") parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") @@ -290,6 +290,7 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan wav.seek(0) torch.cuda.empty_cache() + torch.mps.empty_cache() return StreamingResponse(wav, media_type="audio/wav") From 93dd8334f4ce7fb5ccdeabebe05deb26a3cf30fb Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Wed, 24 Jan 2024 17:02:18 +0800 Subject: [PATCH 045/126] Update api.py --- api.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/api.py b/api.py index 376b0bc..725b12d 100644 --- a/api.py +++ b/api.py @@ -192,13 +192,18 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") + zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32) with torch.no_grad(): - wav16k, sr = librosa.load(ref_wav_path, sr=16000) # 派蒙 + wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) + zero_wav_torch = torch.from_numpy(zero_wav) if (is_half == True): wav16k = wav16k.half().to(device) + zero_wav_torch = zero_wav_torch.half().to(device) else: wav16k = wav16k.to(device) + zero_wav_torch = zero_wav_torch.to(device) + wav16k=torch.cat([wav16k,zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] @@ -209,7 +214,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1 = cleaned_text_to_sequence(phones1) texts = text.split("\n") audio_opt = [] - zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32) + for text in texts: phones2, word2ph2, norm_text2 = clean_text(text, text_language) phones2 = cleaned_text_to_sequence(phones2) From a8e603445fdefbbce53c833987b8df32dfa0749e Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 17:27:58 +0800 Subject: [PATCH 046/126] support mps, optimized device selection --- GPT_SoVITS/inference_webui.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index fd04ac8..1d417b1 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -35,7 +35,13 @@ from my_utils import load_audio from tools.i18n.i18n import I18nAuto i18n = I18nAuto() -device = "cuda" +if torch.cuda.is_available(): + device = "cuda" +elif torch.mps.is_available(): + device = "mps" +else: + device = "cpu" + tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: From 8069264e642f6e9a9a37f88a78ae1dd788e7865c Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 17:30:49 +0800 Subject: [PATCH 047/126] mps support --- GPT_SoVITS/s1_train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index 4a77006..db7b9a3 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -116,9 +116,9 @@ def main(args): devices=-1, benchmark=False, fast_dev_run=False, - strategy=DDPStrategy( + strategy = "auto" if torch.mps.is_available() else DDPStrategy( process_group_backend="nccl" if platform.system() != "Windows" else "gloo" - ), + ), # mps 不支持多节点训练 precision=config["train"]["precision"], logger=logger, num_sanity_val_steps=0, From 07a5339691e786299d5a96364297be3ddedd3148 Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Wed, 24 Jan 2024 19:37:47 +0800 Subject: [PATCH 048/126] mps support --- GPT_SoVITS/AR/data/bucket_sampler.py | 7 +- GPT_SoVITS/inference_webui.py | 4 +- GPT_SoVITS/prepare_datasets/1-get-text.py | 2 +- .../prepare_datasets/2-get-hubert-wav32k.py | 2 +- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 2 +- GPT_SoVITS/s1_train.py | 2 +- GPT_SoVITS/s2_train.py | 82 +++++++++++++------ config.py | 2 +- 8 files changed, 70 insertions(+), 33 deletions(-) diff --git a/GPT_SoVITS/AR/data/bucket_sampler.py b/GPT_SoVITS/AR/data/bucket_sampler.py index 7d752db..647491f 100644 --- a/GPT_SoVITS/AR/data/bucket_sampler.py +++ b/GPT_SoVITS/AR/data/bucket_sampler.py @@ -41,12 +41,13 @@ class DistributedBucketSampler(Sampler[T_co]): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() + num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1 if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() - torch.cuda.set_device(rank) + rank = dist.get_rank() if torch.cuda.is_available() else 0 + if torch.cuda.is_available(): + torch.cuda.set_device(rank) if rank >= num_replicas or rank < 0: raise ValueError( "Invalid rank {}, rank should be in the interval" diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 1d417b1..79e4a82 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -35,9 +35,11 @@ from my_utils import load_audio from tools.i18n.i18n import I18nAuto i18n = I18nAuto() +os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。 + if torch.cuda.is_available(): device = "cuda" -elif torch.mps.is_available(): +elif torch.backends.mps.is_available(): device = "mps" else: device = "cpu" diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index 8579693..b4a145c 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -46,7 +46,7 @@ if os.path.exists(txt_path) == False: bert_dir = "%s/3-bert" % (opt_dir) os.makedirs(opt_dir, exist_ok=True) os.makedirs(bert_dir, exist_ok=True) - device = "cuda:0" + device = "cuda:0" if torch.cuda.is_available() else "mps" tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) if is_half == True: diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 71b48a9..31e8068 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -47,7 +47,7 @@ os.makedirs(wav32dir,exist_ok=True) maxx=0.95 alpha=0.5 -device="cuda:0" +device="cuda:0" if torch.cuda.is_available() else "mps" model=cnhubert.get_model() # is_half=False if(is_half==True): diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 7cee6e4..69eea07 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -38,7 +38,7 @@ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) if os.path.exists(semantic_path) == False: os.makedirs(opt_dir, exist_ok=True) - device = "cuda:0" + device = "cuda:0" if torch.cuda.is_available() else "mps" hps = utils.get_hparams_from_file(s2config_path) vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index db7b9a3..30c167e 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -116,7 +116,7 @@ def main(args): devices=-1, benchmark=False, fast_dev_run=False, - strategy = "auto" if torch.mps.is_available() else DDPStrategy( + strategy = "auto" if torch.backends.mps.is_available() else DDPStrategy( process_group_backend="nccl" if platform.system() != "Windows" else "gloo" ), # mps 不支持多节点训练 precision=config["train"]["precision"], diff --git a/GPT_SoVITS/s2_train.py b/GPT_SoVITS/s2_train.py index d2ec262..e6b64f6 100644 --- a/GPT_SoVITS/s2_train.py +++ b/GPT_SoVITS/s2_train.py @@ -44,9 +44,12 @@ global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." + assert torch.cuda.is_available() or torch.backends.mps.is_available(), "Only GPU training is allowed." - n_gpus = torch.cuda.device_count() + if torch.backends.mps.is_available(): + n_gpus = 1 + else: + n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = str(randint(20000, 55555)) @@ -70,13 +73,14 @@ def run(rank, n_gpus, hps): writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval")) dist.init_process_group( - backend="gloo" if os.name == "nt" else "nccl", + backend = "gloo" if os.name == "nt" or torch.backends.mps.is_available() else "nccl", init_method="env://", world_size=n_gpus, rank=rank, ) torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) + if torch.cuda.is_available(): + torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data) ######## train_sampler = DistributedBucketSampler( @@ -128,9 +132,14 @@ def run(rank, n_gpus, hps): hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **hps.model, - ).cuda(rank) + ).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model, + ).to("mps") - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) + net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to("mps") for name, param in net_g.named_parameters(): if not param.requires_grad: print(name, "not requires_grad") @@ -174,8 +183,12 @@ def run(rank, n_gpus, hps): betas=hps.train.betas, eps=hps.train.eps, ) - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + if torch.cuda.is_available(): + net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) + net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) + else: + net_g = net_g.to("mps") + net_d = net_d.to("mps") try: # 如果能加载自动resume _, _, _, epoch_str = utils.load_checkpoint( @@ -205,6 +218,9 @@ def run(rank, n_gpus, hps): net_g.module.load_state_dict( torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], strict=False, + ) if torch.cuda.is_available() else net_g.load_state_dict( + torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"], + strict=False, ) ) ##测试不加载优化器 if hps.train.pretrained_s2D != "": @@ -213,6 +229,8 @@ def run(rank, n_gpus, hps): print( net_d.module.load_state_dict( torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] + ) if torch.cuda.is_available() else net_d.load_state_dict( + torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"] ) ) @@ -288,18 +306,26 @@ def train_and_evaluate( text, text_lengths, ) in tqdm(enumerate(train_loader)): - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( - rank, non_blocking=True - ) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( - rank, non_blocking=True - ) - ssl = ssl.cuda(rank, non_blocking=True) - ssl.requires_grad = False - # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) - text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda( - rank, non_blocking=True - ) + if torch.cuda.is_available(): + spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( + rank, non_blocking=True + ) + y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( + rank, non_blocking=True + ) + ssl = ssl.cuda(rank, non_blocking=True) + ssl.requires_grad = False + # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) + text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda( + rank, non_blocking=True + ) + else: + spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps") + y, y_lengths = y.to("mps"), y_lengths.to("mps") + ssl = ssl.to("mps") + ssl.requires_grad = False + # ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True) + text, text_lengths = text.to("mps"), text_lengths.to("mps") with autocast(enabled=hps.train.fp16_run): ( @@ -500,13 +526,21 @@ def evaluate(hps, generator, eval_loader, writer_eval): text_lengths, ) in enumerate(eval_loader): print(111) - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - ssl = ssl.cuda() - text, text_lengths = text.cuda(), text_lengths.cuda() + if torch.cuda.is_available(): + spec, spec_lengths = spec.cuda(), spec_lengths.cuda() + y, y_lengths = y.cuda(), y_lengths.cuda() + ssl = ssl.cuda() + text, text_lengths = text.cuda(), text_lengths.cuda() + else: + spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps") + y, y_lengths = y.to("mps"), y_lengths.to("mps") + ssl = ssl.to("mps") + text, text_lengths = text.to("mps"), text_lengths.to("mps") for test in [0, 1]: y_hat, mask, *_ = generator.module.infer( ssl, spec, spec_lengths, text, text_lengths, test=test + ) if torch.cuda.is_available() else generator.infer( + ssl, spec, spec_lengths, text, text_lengths, test=test ) y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length diff --git a/config.py b/config.py index c9124bf..897f53c 100644 --- a/config.py +++ b/config.py @@ -17,7 +17,7 @@ exp_root = "logs" python_exec = sys.executable or "python" if torch.cuda.is_available(): infer_device = "cuda" -elif torch.mps.is_available(): +elif torch.backends.mps.is_available(): infer_device = "mps" else: infer_device = "cpu" From 5111713ed7e82f6f32d65c3c2a5be9962211c85c Mon Sep 17 00:00:00 2001 From: Miuzarte <982809597@qq.com> Date: Wed, 24 Jan 2024 20:16:39 +0800 Subject: [PATCH 049/126] feat: api.py change refer --- api.py | 92 +++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 17 deletions(-) diff --git a/api.py b/api.py index 725b12d..60d5919 100644 --- a/api.py +++ b/api.py @@ -7,7 +7,7 @@ import torch import librosa import soundfile as sf from fastapi import FastAPI, Request, HTTPException -from fastapi.responses import StreamingResponse +from fastapi.responses import StreamingResponse, JSONResponse import uvicorn from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np @@ -51,10 +51,18 @@ args = parser.parse_args() sovits_path = args.sovits_path gpt_path = args.gpt_path -default_refer_path = args.default_refer_path -default_refer_text = args.default_refer_text -default_refer_language = args.default_refer_language -has_preset = False + +class DefaultRefer: + def __init__(self, path, text, language): + self.path = args.default_refer_path + self.text = args.default_refer_text + self.language = args.default_refer_language + + def is_ready(self) -> bool: + return is_full(self.path, self.text, self.language) + + +default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language) device = args.device port = args.port @@ -68,15 +76,13 @@ if gpt_path == "": print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}") # 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用 -if default_refer_path == "" or default_refer_text == "" or default_refer_language == "": - default_refer_path, default_refer_text, default_refer_language = "", "", "" +if default_refer.path == "" or default_refer.text == "" or default_refer.language == "": + default_refer.path, default_refer.text, default_refer.language = "", "", "" print("[INFO] 未指定默认参考音频") - has_preset = False else: - print(f"[INFO] 默认参考音频路径: {default_refer_path}") - print(f"[INFO] 默认参考音频文本: {default_refer_text}") - print(f"[INFO] 默认参考音频语种: {default_refer_language}") - has_preset = True + print(f"[INFO] 默认参考音频路径: {default_refer.path}") + print(f"[INFO] 默认参考音频文本: {default_refer.text}") + print(f"[INFO] 默认参考音频语种: {default_refer.language}") is_half = g_config.is_half if args.full_precision: @@ -100,6 +106,20 @@ else: bert_model = bert_model.to(device) +def is_empty(*items): # 任意一项不为空返回False + for item in items: + if item is not None and item != "": + return False + return True + + +def is_full(*items): # 任意一项为空返回False + for item in items: + if item is None or item == "": + return False + return True + + def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") @@ -203,7 +223,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) else: wav16k = wav16k.to(device) zero_wav_torch = zero_wav_torch.to(device) - wav16k=torch.cat([wav16k,zero_wav_torch]) + wav16k = torch.cat([wav16k, zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] @@ -264,6 +284,25 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) +def handle_change(path, text, language): + if is_empty(path, text, language): + raise HTTPException(status_code=400, detail='缺少任意一项以下参数: "path", "text", "language"') + + if path != "" or path is not None: + default_refer.path = path + if text != "" or text is not None: + default_refer.text = text + if language != "" or language is not None: + default_refer.language = language + + print(f"[INFO] 当前默认参考音频路径: {default_refer.path}") + print(f"[INFO] 当前默认参考音频文本: {default_refer.text}") + print(f"[INFO] 当前默认参考音频语种: {default_refer.language}") + print(f"[INFO] is_ready: {default_refer.is_ready()}") + + return JSONResponse({"code": 0, "message": "Success"}, status_code=200) + + def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): if command == "/restart": os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) @@ -277,11 +316,11 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan or prompt_language == "" or prompt_language is None ): refer_wav_path, prompt_text, prompt_language = ( - default_refer_path, - default_refer_text, - default_refer_language, + default_refer.path, + default_refer.text, + default_refer.language, ) - if not has_preset: + if not default_refer.is_ready(): raise HTTPException(status_code=400, detail="未指定参考音频且接口无预设") with torch.no_grad(): @@ -301,6 +340,25 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan app = FastAPI() +@app.post("/change_refer") +async def change_refer(request: Request): + json_post_raw = await request.json() + return handle_change( + json_post_raw.get("path"), + json_post_raw.get("text"), + json_post_raw.get("language") + ) + + +@app.get("/change_refer") +async def change_refer( + path: str = None, + text: str = None, + language: str = None +): + return handle_change(path, text, language) + + @app.post("/") async def tts_endpoint(request: Request): json_post_raw = await request.json() From 9092ac6d77aff76db72ca6bc09f3426e8adf61c6 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Wed, 24 Jan 2024 21:40:36 +0800 Subject: [PATCH 050/126] Update ko_KR.json --- i18n/locale/ko_KR.json | 408 +++++++++++++++++++++++++++-------------- 1 file changed, 274 insertions(+), 134 deletions(-) diff --git a/i18n/locale/ko_KR.json b/i18n/locale/ko_KR.json index 816ed3f..fa53060 100644 --- a/i18n/locale/ko_KR.json +++ b/i18n/locale/ko_KR.json @@ -1,135 +1,275 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3이면 harvest 음높이 인식 결과에 중간값 필터를 사용합니다. 이 수치는 필터 반경이며, 사용하면 불명확한 음성을 어느정도 배제할 수 있습니다.", - "A模型权重": "A 모델 가중치", - "A模型路径": "A 모델 경로", - "B模型路径": "B 모델 경로", - "E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오+주석\\요네즈 켄시\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음높이, 기본 F0 및 음높이 변화를 대체함", - "Index Rate": "인덱스 비율", - "Onnx导出": "Onnx 내보내기", - "Onnx输出路径": "Onnx 출력 경로", - "RVC模型路径": "RVC 모델 경로", - "ckpt处理": "ckpt 처리", - "harvest进程数": "harvest 프로세스 수", - "index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다.", - "pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: '-'로 구분하여 입력된 다른 프로세스 카드 번호, 예를 들어 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 설정을 작성합니다. 실험 데이터는 logs 아래에 있으며, 각 실험마다 하나의 폴더가 있습니다. 실험 이름 경로를 수동으로 입력해야 하며, 이 안에는 실험 설정, 로그, 훈련으로 얻은 모델 파일이 포함되어 있습니다.", - "step1:正在处理数据": "step1: 데이터 처리 중", - "step2:正在提取音高&正在提取特征": "step2: 음높이 추출 및 특성 추출 중", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 훈련 폴더 아래 모든 오디오로 디코딩 가능한 파일을 자동으로 순회하고 슬라이스 정규화를 진행하여, 실험 디렉토리 아래에 2개의 wav 폴더를 생성합니다; 현재는 단일 사용자 훈련만 지원합니다.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU를 사용해 음높이를 추출합니다(모델이 음높이를 포함하는 경우), GPU를 사용해 특성을 추출합니다(카드 번호 선택)", - "step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정을 작성하고, 모델 및 인덱스 훈련을 시작합니다", - "step3a:正在训练模型": "step3a: 모델 훈련 중", - "一键训练": "원키 트레이닝", - "也可批量输入音频文件, 二选一, 优先读文件夹": "대량으로 오디오 파일 입력도 가능, 둘 중 하나 선택, 폴더 우선 읽기", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "인간 목소리와 반주 분리 대량 처리, UVR5 모델 사용.
올바른 폴더 경로 예: E:\\codes\\py39\\vits_vc_gpu\\백로서화 테스트 케이스(파일 탐색기 주소창에서 복사하면 됨).
모델은 세 가지 유형으로 나뉩니다:
1. 인간 목소리 보존: 하모니가 없는 오디오를 선택, 주요 인간 목소리를 HP5보다 더 잘 보존. 내장된 HP2와 HP3 모델, HP3는 약간의 반주를 놓칠 수 있지만 HP2보다는 인간 목소리를 조금 더 잘 보존합니다.
2. 오직 주요 인간 목소리 보존: 하모니가 있는 오디오를 선택, 주요 인간 목소리가 약간 약해질 수 있음. 내장된 HP5 모델 하나;
3. 울림 제거, 지연 제거 모델(by FoxJoy):
  (1)MDX-Net(onnx_dereverb): 양채널 울림에 대해서는 최선의 선택, 단채널 울림 제거 불가능;
 (234)DeEcho: 지연 효과 제거. Aggressive가 Normal보다 더 철저하게 제거하며, DeReverb는 추가로 울림 제거, 단일 채널 울림 제거 가능하지만 고주파 중심의 판형 울림은 완전히 제거하지 못함.
울림/지연 제거 시 참고:
1. DeEcho-DeReverb 모델의 처리 시간은 다른 두 DeEcho 모델의 거의 2배임;
2. MDX-Net-Dereverb 모델은 상당히 느림;
3. 개인적으로 추천하는 가장 깨끗한 구성은 MDX-Net 다음에 DeEcho-Aggressive 사용.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력한 카드 번호, 예를 들어 0-1-2는 카드0, 카드1, 카드2 사용", - "伴奏人声分离&去混响&去回声": "반주 및 인간 목소리 분리 & 울림 제거 & 에코 제거", - "使用模型采样率": "모델 샘플링 레이트 사용", - "使用设备采样率": "장치 샘플링 레이트 사용", - "保存名": "저장 이름", - "保存的文件名, 默认空为和源文件同名": "저장된 파일 이름, 기본값은 원본 파일과 동일", - "保存的模型名不带后缀": "저장된 모델 이름은 접미사 없음", - "保存频率save_every_epoch": "저장 빈도 save_every_epoch", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "청결한 자음과 숨소리를 보호하고, 전자음의 찢어짐과 같은 아티팩트를 방지하며, 0.5까지 끌어올리면 보호가 활성화되지 않으며, 낮추면 보호 강도는 증가하지만 인덱싱 효과는 감소할 수 있음", - "修改": "수정", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정(오직 weights 폴더에서 추출된 소형 모델 파일만 지원)", - "停止音频转换": "오디오 변환 중지", - "全流程结束!": "전체 과정 완료!", - "刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로고침", - "加载模型": "모델 로드", - "加载预训练底模D路径": "사전 훈련된 베이스 모델 D 경로 로드", - "加载预训练底模G路径": "사전 훈련된 베이스 모델 G 경로 로드", - "单次推理": "단일 추론", - "卸载音色省显存": "음색 언로드로 메모리 절약", - "变调(整数, 半音数量, 升八度12降八度-12)": "변조(정수, 반음 수, 옥타브 상승 12, 옥타브 하강 -12)", - "后处理重采样至最终采样率,0为不进行重采样": "후처리로 최종 샘플링 레이트까지 리샘플링, 0은 리샘플링하지 않음", - "否": "아니오", - "启用相位声码器": "위상 보코더 활성화", - "响应阈值": "응답 임계값", - "响度因子": "소리 크기 인자", - "处理数据": "데이터 처리", - "导出Onnx模型": "Onnx 모델 내보내기", - "导出文件格式": "파일 형식 내보내기", - "常见问题解答": "자주 묻는 질문 답변", - "常规设置": "일반 설정", - "开始音频转换": "오디오 변환 시작", - "很遗憾您这没有能用的显卡来支持您训练": "유감스럽게도 훈련을 지원할 수 있는 그래픽 카드가 없습니다", - "性能设置": "성능 설정", - "总训练轮数total_epoch": "총 훈련 회차 total_epoch", - "批量推理": "대량 추론", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "대량 변환, 변환할 오디오 폴더 입력, 또는 여러 오디오 파일 업로드, 지정된 폴더(기본값 opt)에 변환된 오디오 출력.", - "指定输出主人声文件夹": "주인공 목소리 출력 폴더 지정", - "指定输出文件夹": "출력 파일 폴더 지정", - "指定输出非主人声文件夹": "비주인공 목소리 출력 폴더 지정", - "推理时间(ms):": "추론 시간(ms):", - "推理音色": "추론 음색", - "提取": "추출", - "提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수", - "是": "예", - "是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 가장 최신의 ckpt 파일만 저장할지 여부", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "매 저장 시점마다 최종 작은 모델을 weights 폴더에 저장할지 여부", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 VRAM에 캐시할지 여부. 10분 미만의 작은 데이터는 훈련 속도를 높이기 위해 캐시할 수 있으나, 큰 데이터는 VRAM을 초과하여 큰 속도 향상을 기대할 수 없음.", - "显卡信息": "그래픽 카드 정보", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "이 소프트웨어는 MIT 라이선스로 오픈 소스이며, 작성자는 소프트웨어에 대한 어떠한 제어도 가지지 않으며, 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 사용자는 모든 책임을 져야 함.
이 조항을 인정하지 않는 경우, 소프트웨어 패키지 내의 어떠한 코드나 파일도 사용하거나 인용할 수 없음. 자세한 내용은 루트 디렉토리의 LICENSE를 참조.", - "查看": "보기", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 보기(오직 weights 폴더에서 추출된 작은 모델 파일만 지원)", - "检索特征占比": "특징 검색 비율", - "模型": "모델", - "模型推理": "모델 추론", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더 아래 대용량 모델 경로 입력), 중간에 훈련을 중단하고 싶은 경우나 작은 파일 모델을 자동으로 저장하지 않은 경우, 또는 중간 모델을 테스트하고 싶은 경우에 적합", - "模型是否带音高指导": "모델이 음높이 지도를 포함하는지 여부", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델이 음높이 지도를 포함하는지 여부(노래에는 필수, 말하기에는 선택적)", - "模型是否带音高指导,1是0否": "모델이 음높이 지도를 포함하는지 여부, 1은 '예', 0은 '아니오'", - "模型版本型号": "모델 버전 및 모델", - "模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능", - "模型路径": "모델 경로", - "每张显卡的batch_size": "각 GPU의 batch_size", - "淡入淡出长度": "페이드 인/아웃 길이", - "版本": "버전", - "特征提取": "특징 추출", - "特征检索库文件路径,为空则使用下拉的选择结果": "특징 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성에서 여성으로 전환 시 +12키 추천, 여성에서 남성으로 전환 시 -12키 추천, 음역대 폭발로 음색 왜곡이 발생할 경우 적절한 음역대로 조정 가능.", - "目标采样率": "목표 샘플링 비율", - "算法延迟(ms):": "알고리즘 지연(ms):", - "自动检测index路径,下拉式选择(dropdown)": "index 경로 자동 감지, 드롭다운 선택", - "融合": "통합", - "要改的模型信息": "수정할 모델 정보", - "要置入的模型信息": "삽입할 모델 정보", - "训练": "훈련", - "训练模型": "모델 훈련", - "训练特征索引": "특징 인덱스 훈련", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련이 완료되었습니다. 콘솔 훈련 로그나 실험 폴더 내의 train.log를 확인하세요.", - "请指定说话人id": "화자 id를 지정해주세요.", - "请选择index文件": "index 파일을 선택해주세요.", - "请选择pth文件": "pth 파일을 선택해주세요.", - "请选择说话人id": "화자 id를 선택해주세요.", - "转换": "변환", - "输入实验名": "실험명을 입력하세요.", - "输入待处理音频文件夹路径": "처리할 오디오 파일 폴더 경로를 입력하세요.", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리할 오디오 파일 폴더 경로를 입력하세요(파일 관리자의 주소 표시줄에서 복사하세요).", - "输入待处理音频文件路径(默认是正确格式示例)": "처리할 오디오 파일 경로를 입력하세요(기본값은 올바른 형식의 예시입니다).", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "원본 볼륨 엔벨로프와 출력 볼륨 엔벨로프의 혼합 비율을 입력하세요. 1에 가까울수록 출력 엔벨로프를 더 많이 사용합니다.", - "输入监听": "모니터링 입력", - "输入训练文件夹路径": "학습시킬 파일 폴더의 경로를 입력하세요.", - "输入设备": "입력 장치", - "输入降噪": "입력 노이즈 감소", - "输出信息": "출력 정보", - "输出变声": "음성 변환 출력", - "输出设备": "출력 장치", - "输出降噪": "출력 노이즈 감소", - "输出音频(右下角三个点,点了可以下载)": "오디오 출력(오른쪽 하단 세 개의 점, 클릭하면 다운로드 가능)", - "选择.index文件": ".index 파일 선택", - "选择.pth文件": ".pth 파일 선택", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음고 추출 알고리즘을 선택하세요. 노래 입력 시 pm으로 속도를 높일 수 있으며, harvest는 저음이 좋지만 매우 느리고, crepe는 효과가 좋지만 GPU를 많이 사용합니다.", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음고 추출 알고리즘을 선택하세요. 노래 입력 시 pm으로 속도를 높일 수 있고, harvest는 저음이 좋지만 매우 느리며, crepe는 효과가 좋지만 GPU를 많이 사용하고, rmvpe는 가장 좋은 효과를 내면서 GPU를 적게 사용합니다.", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음고 추출 알고리즘 선택: 노래 입력 시 pm으로 속도를 높일 수 있으며, 고품질 음성이지만 CPU가 낮을 때는 dio로 속도를 높일 수 있고, harvest는 품질이 더 좋지만 느리며, rmvpe는 최고의 효과를 내면서 CPU/GPU를 적게 사용합니다.", - "采样率:": "샘플링 레이트:", - "采样长度": "샘플링 길이", - "重载设备列表": "장치 목록 리로드", - "音调设置": "음조 설정", - "音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버를 사용해주세요)", - "音高算法": "음고 알고리즘", - "额外推理时长": "추가적인 추론 시간" -} + "很遗憾您这没有能用的显卡来支持您训练": "아쉽게도 훈련을 지원할 수 있는 사용 가능한 그래픽 카드가 없습니다", + "UVR5已开启": "UVR5가 활성화되었습니다", + "UVR5已关闭": "UVR5가 비활성화되었습니다", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "본 소프트웨어는 MIT 라이선스로 오픈 소스로 제공되며, 제작자는 소프트웨어에 대해 어떠한 제어력도 가지지 않습니다. 소프트웨어 사용자 및 소프트웨어에서 내보낸 소리를 전파하는 자는 전적으로 책임져야 합니다.
이 조항을 인정하지 않으면 소프트웨어의 코드 및 파일을 사용하거나 인용할 수 없습니다. 루트 디렉터리의 LICENSE를 참조하십시오.", + "0-前置数据集获取工具": "0-전방 데이터 세트 수집 도구", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 보컬 및 반주 분리 및 에코 및 지연 제거 도구", + "是否开启UVR5-WebUI": "UVR5-WebUI를 열까요?", + "UVR5进程输出信息": "UVR5 프로세스 출력 정보", + "0b-语音切分工具": "0b-음성 분리 도구", + "音频自动切分输入路径,可文件可文件夹": "오디오 자동 분리 입력 경로, 파일 또는 폴더 가능", + "切分后的子音频的输出根目录": "분리된 하위 오디오의 출력 기본 디렉터리", + "threshold:音量小于这个值视作静音的备选切割点": "임계 값: 이 값보다 작은 볼륨은 대체 분리 지점으로 간주됩니다.", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "최소 길이: 각 세그먼트의 최소 길이. 첫 번째 세그먼트가 너무 짧으면 계속해서 뒷부분과 연결하여 이 값 이상이 될 때까지", + "min_interval:最短切割间隔": "최소 분리 간격", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop 크기: 볼륨 곡선을 계산하는 방법. 작을수록 정확도가 높아지지만 계산량이 높아집니다 (정확도가 높다고 효과가 좋아지지 않음)", + "max_sil_kept:切完后静音最多留多长": "최대 유지되는 정적 길이 (분리 후)", + "开启语音切割": "음성 분리 활성화", + "终止语音切割": "음성 분리 종료", + "max:归一化后最大值多少": "최대 값 (정규화 후)", + "alpha_mix:混多少比例归一化后音频进来": "알파 믹스: 정규화된 오디오가 들어오는 비율", + "切割使用的进程数": "사용되는 프로세스 수로 자르기", + "语音切割进程输出信息": "음성 분리 프로세스 출력 정보", + "0c-中文批量离线ASR工具": "0c-중국어 대량 오프라인 ASR 도구", + "开启离线批量ASR": "오프라인 대량 ASR 활성화", + "终止ASR进程": "ASR 프로세스 종료", + "批量ASR(中文only)输入文件夹路径": "대량 ASR (중국어 전용) 입력 폴더 경로", + "ASR进程输出信息": "ASR 프로세스 출력 정보", + "0d-语音文本校对标注工具": "0d-음성 텍스트 교정 주석 도구", + "是否开启打标WebUI": "웹 기반 주석 활성화 여부", + "打标数据标注文件路径": "주석 데이터 주석 파일 경로", + "打标工具进程输出信息": "주석 도구 프로세스 출력 정보", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*실험/모델 이름", + "显卡信息": "그래픽 카드 정보", + "预训练的SoVITS-G模型路径": "사전 훈련된 SoVITS-G 모델 경로", + "预训练的SoVITS-D模型路径": "사전 훈련된 SoVITS-D 모델 경로", + "预训练的GPT模型路径": "사전 훈련된 GPT 모델 경로", + "1A-训练集格式化工具": "1A-훈련 세트 형식 지정 도구", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "logs/실험 이름 디렉터리에는 23456으로 시작하는 파일과 폴더가 있어야 함", + "*文本标注文件": "*텍스트 주석 파일", + "*训练集音频文件目录": "*훈련 세트 오디오 파일 디렉터리", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "훈련 세트 오디오 파일 디렉터리 - 목록 파일에 해당하는 원형 이름 연결", + "1Aa-文本内容": "1Aa-텍스트 내용", + "GPU卡号以-分割,每个卡号一个进程": "GPU 카드 번호는 -로 구분되며 각 카드 번호에 하나의 프로세스가 있어야 함", + "预训练的中文BERT模型路径": "사전 훈련된 중국어 BERT 모델 경로", + "开启文本获取": "텍스트 추출 활성화", + "终止文本获取进程": "텍스트 추출 프로세스 종료", + "文本进程输出信息": "텍스트 프로세스 출력 정보", + "1Ab-SSL自监督特征提取": "1Ab-SSL 자기 지도 특징 추출", + "预训练的SSL模型路径": "사전 훈련된 SSL 모델 경로", + "开启SSL提取": "SSL 추출 활성화", + "终止SSL提取进程": "SSL 추출 프로세스 종료", + "SSL进程输出信息": "SSL 프로세스 출력 정보", + "1Ac-语义token提取": "1Ac-의미 토큰 추출", + "开启语义token提取": "의미 토큰 추출 활성화", + "终止语义token提取进程": "의미 토큰 추출 프로세스 종료", + "语义token提取进程输出信息": "의미 토큰 추출 프로세스 출력 정보", + "1Aabc-训练集格式化一键三连": "1Aabc-훈련 세트 형식 지정 일괄 처리", + "开启一键三连": "일괄 처리 활성화", + "终止一键三连": "일괄 처리 종료", + "一键三连进程输出信息": "일괄 처리 프로세스 출력 정보", + "1B-微调训练": "1B-미세 조정 훈련", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS 훈련. 공유 용 모델 파일은 SoVITS_weights 하위에 출력됩니다.", + "每张显卡的batch_size": "각 그래픽 카드의 배치 크기", + "总训练轮数total_epoch,不建议太高": "총 훈련 라운드 수 (total_epoch), 너무 높지 않게 권장됨", + "文本模块学习率权重": "텍스트 모듈 학습률 가중치", + "保存频率save_every_epoch": "저장 빈도 (각 라운드마다)", + "是否仅保存最新的ckpt文件以节省硬盘空间": "디스크 공간을 절약하기 위해 최신 ckpt 파일만 저장할지 여부", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "각 저장 시간에 최종 작은 모델을 weights 폴더에 저장할지 여부", + "开启SoVITS训练": "SoVITS 훈련 활성화", + "终止SoVITS训练": "SoVITS 훈련 종료", + "SoVITS训练进程输出信息": "SoVITS 훈련 프로세스 출력 정보", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT 훈련. 공유 용 모델 파일은 GPT_weights 하위에 출력됩니다.", + "总训练轮数total_epoch": "총 훈련 라운드 수 (total_epoch)", + "开启GPT训练": "GPT 훈련 활성화", + "终止GPT训练": "GPT 훈련 종료", + "GPT训练进程输出信息": "GPT 훈련 프로세스 출력 정보", + "1C-推理": "1C-추론", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "SoVITS_weights 및 GPT_weights에 저장된 훈련 완료된 모델 중 선택. 기본적으로 하나는 기본 모델이며 5초 Zero Shot TTS를 체험할 수 있습니다.", + "*GPT模型列表": "*GPT 모델 목록", + "*SoVITS模型列表": "*SoVITS 모델 목록", + "GPU卡号,只能填1个整数": "GPU 카드 번호, 1개의 정수만 입력 가능", + "刷新模型路径": "모델 경로 새로 고침", + "是否开启TTS推理WebUI": "TTS 추론 WebUI 활성화 여부", + "TTS推理WebUI进程输出信息": "TTS 추론 WebUI 프로세스 출력 정보", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-음성 변환", + "施工中,请静候佳音": "공사 중입니다. 기다려주십시오.", + "TTS推理进程已开启": "TTS 추론 프로세스가 열렸습니다", + "TTS推理进程已关闭": "TTS 추론 프로세스가 닫혔습니다", + "打标工具WebUI已开启": "주석 도구 WebUI가 열렸습니다", + "打标工具WebUI已关闭": "주석 도구 WebUI가 닫혔습니다", + "*请上传并填写参考信息": "*참고 정보를 업로드하고 입력하십시오", + "*请填写需要合成的目标文本": "*합성할 대상 텍스트를 입력하십시오", + "ASR任务开启:%s": "ASR 작업 시작: %s", + "GPT训练完成": "GPT 훈련 완료", + "GPT训练开始:%s": "GPT 훈련 시작: %s", + "SSL提取进程执行中": "SSL 추출 프로세스 실행 중", + "SSL提取进程结束": "SSL 추출 프로세스 종료", + "SoVITS训练完成": "SoVITS 훈련 완료", + "SoVITS训练开始:%s": "SoVITS 훈련 시작: %s", + "一键三连中途报错": "일괄 처리 중 오류 발생", + "一键三连进程结束": "일괄 처리 프로세스 종료", + "中文": "중국어", + "凑50字一切": "50자를 채우십시오", + "凑五句一切": "다섯 문장을 채우십시오", + "切分后文本": "분리된 텍스트", + "切割执行中": "분리 진행 중", + "切割结束": "분리 종료", + "参考音频的文本": "참고 오디오의 텍스트", + "参考音频的语种": "참고 오디오의 언어", + "合成语音": "합성 음성", + "后续将支持混合语种编码文本输入。": "향후 혼합 언어 코딩 텍스트 입력을 지원할 예정입니다.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "이미 진행 중인 ASR 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "이미 진행 중인 GPT 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "이미 진행 중인 SSL 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "이미 진행 중인 SoVITS 훈련 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "이미 진행 중인 일괄 처리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "이미 진행 중인 분리 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "이미 진행 중인 텍스트 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "이미 진행 중인 의미 토큰 추출 작업이 있습니다. 다음 작업을 시작하려면 먼저 종료하십시오.", + "已终止ASR进程": "ASR 프로세스 종료됨", + "已终止GPT训练": "GPT 훈련 종료됨", + "已终止SoVITS训练": "SoVITS 훈련 종료됨", + "已终止所有1a进程": "모든 1a 프로세스 종료됨", + "已终止所有1b进程": "모든 1b 프로세스 종료됨", + "已终止所有一键三连进程": "모든 일괄 처리 프로세스 종료됨", + "已终止所有切割进程": "모든 분리 프로세스 종료됨", + "已终止所有语义token进程": "모든 의미 토큰 프로세스 종료됨", + "按中文句号。切": "중국어 문장으로 분리하십시오.", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "텍스트 분리 도구. 너무 긴 텍스트는 합성 결과가 항상 좋지 않을 수 있으므로 너무 길면 먼저 분리하는 것이 좋습니다. 합성은 텍스트 줄 바꿈을 기준으로 분리되어 다시 조합됩니다.", + "文本进程执行中": "텍스트 프로세스 실행 중", + "文本进程结束": "텍스트 프로세스 종료", + "日文": "일본어", + "英文": "영어", + "语义token提取进程执行中": "의미 토큰 추출 프로세스 실행 중", + "语义token提取进程结束": "의미 토큰 추출 프로세스 종료", + "请上传参考音频": "참고 오디오를 업로드하십시오", + "输入路径不存在": "입력 경로가 존재하지 않습니다", + "输入路径存在但既不是文件也不是文件夹": "입력 경로가 파일이나 폴더가 아닙니다", + "输出的语音": "출력 음성", + "进度:1a-done": "진행: 1a-done", + "进度:1a-done, 1b-ing": "진행: 1a-done, 1b-ing", + "进度:1a-ing": "진행: 1a-ing", + "进度:1a1b-done": "진행: 1a1b-done", + "进度:1a1b-done, 1cing": "진행: 1a1b-done, 1cing", + "进度:all-done": "진행: all-done", + "需要合成的切分前文本": "합성해야 할 분할 전 텍스트", + "需要合成的文本": "합성해야 할 텍스트", + "需要合成的语种": "합성해야 할 언어", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3이면 harvest 음고 인식 결과에 중앙값 필터를 사용하며, 값은 필터 반경이며 사용하면 소리를 약하게 할 수 있습니다", + "A模型权重": "A 모델 가중치", + "A模型路径": "A 모델 경로", + "B模型路径": "B 모델 경로", + "E:\\语音音频+标注\\米津玄师\\src": "E:\\음성 오디오 + 주석\\Miyuki Kenshi\\src", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 곡선 파일, 선택 사항, 한 줄에 하나의 음고, 기본 F0 및 음조 대신 사용", + "Index Rate": "인덱스 비율", + "Onnx导出": "Onnx 내보내기", + "Onnx输出路径": "Onnx 출력 경로", + "RVC模型路径": "RVC 모델 경로", + "ckpt处理": "ckpt 처리", + "harvest进程数": "harvest 프로세스 수", + "index文件路径不可包含中文": "인덱스 파일 경로에는 중국어를 포함할 수 없습니다", + "pth文件路径不可包含中文": "pth 파일 경로에는 중국어를 포함할 수 없습니다", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe 카드 번호 구성: 각 입력에 사용되는 다른 프로세스 카드를 -로 구분하여 입력하십시오. 예: 0-0-1은 카드 0에서 2개의 프로세스를 실행하고 카드 1에서 1개의 프로세스를 실행합니다", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 실험 구성 입력. 실험 데이터는 logs 하위에 있으며 각 실험에 대한 폴더가 있어야합니다. 실험 이름 경로를 수동으로 입력해야하며 실험 구성, 로그, 훈련된 모델 파일이 포함되어 있습니다.", + "step1:正在处理数据": "step1: 데이터 처리 중", + "step2:正在提取音高&正在提取特征": "step2: 음고 추출 및 특징 추출 중", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 자동으로 훈련 폴더에서 오디오로 디코딩할 수 있는 모든 파일을 반복하고 슬라이스 정규화를 수행하여 실험 디렉토리에 2 개의 wav 폴더를 생성합니다. 현재 단일 훈련만 지원됩니다.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: CPU로 음고 추출(모델이 음고를 지원하는 경우), GPU로 특징 추출(카드 번호 선택)", + "step3: 填写训练设置, 开始训练模型和索引": "step3: 훈련 설정 입력, 모델 및 인덱스 훈련 시작", + "step3a:正在训练模型": "step3a: 모델 훈련 중", + "一键训练": "일괄 훈련", + "也可批量输入音频文件, 二选一, 优先读文件夹": "오디오 파일을 일괄로 입력할 수도 있습니다. 둘 중 하나를 선택하고 폴더를 읽기를 우선합니다.", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "-로 구분하여 입력에 사용되는 카드 번호를 지정하십시오. 예 : 0-1-2는 카드 0, 1 및 2를 사용합니다", + "伴奏人声分离&去混响&去回声": "반주 및 보컬 분리 & 리버브 제거 & 에코 제거", + "使用模型采样率": "모델 샘플링 속도 사용", + "使用设备采样率": "기기 샘플링 속도 사용", + "保存名": "저장 이름", + "保存的文件名, 默认空为和源文件同名": "저장할 파일 이름, 기본적으로 공백은 원본 파일과 동일한 이름입니다", + "保存的模型名不带后缀": "저장할 모델 이름에는 확장자가 없습니다", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "클리어 자음 및 숨소를 보호하여 전자 음향 찢김과 같은 아티팩트를 방지하려면 0.5로 설정하되, 보호 강도를 높이려면 0.5로 당기지 않고 낮추면 인덱스 효과가 감소할 수 있습니다", + "修改": "수정", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보 수정 (weights 폴더에서 추출된 작은 모델 파일만 지원됨)", + "停止音频转换": "오디오 변환 중지", + "全流程结束!": "전체 프로세스 완료!", + "刷新音色列表和索引路径": "음색 목록 및 인덱스 경로 새로 고침", + "加载模型": "모델 로드", + "加载预训练底模D路径": "사전 훈련된 기본 모델 D 경로 로드", + "加载预训练底模G路径": "사전 훈련된 기본 모델 G 경로 로드", + "单次推理": "단일 추론", + "卸载音色省显存": "음색 언로드 및 GPU 메모리 절약", + "变调(整数, 半音数量, 升八度12降八度-12)": "음높이 변경(정수, 반음 수, 올림 높이 12 내림 높이 -12)", + "后处理重采样至最终采样率,0为不进行重采样": "후 처리를 통한 최종 샘플링률 재샘플링, 0은 재샘플링 미실행", + "否": "아니오", + "启用相位声码器": "페이즈 보코더 사용", + "响应阈值": "응답 임계값", + "响度因子": "음량 요소", + "处理数据": "데이터 처리", + "导出Onnx模型": "Onnx 모델 내보내기", + "导出文件格式": "내보내기 파일 형식", + "常见问题解答": "자주 묻는 질문 해결", + "常规设置": "일반 설정", + "开始音频转换": "오디오 변환 시작", + "性能设置": "성능 설정", + "批量推理": "일괄 추론", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "일괄 변환, 변환 대기 중인 오디오 폴더를 입력하거나 여러 오디오 파일을 업로드하고 지정된 폴더(opt 기본값)에 변환된 오디오를 출력합니다.", + "指定输出主人声文件夹": "지정된 주인 목소리 출력 폴더", + "指定输出文件夹": "지정된 출력 폴더", + "指定输出非主人声文件夹": "지정된 비주인 목소리 출력 폴더", + "推理时间(ms):": "추론 시간(ms):", + "推理音色": "추론 음색", + "提取": "추출", + "提取音高和处理数据使用的CPU进程数": "음높이 추출 및 데이터 처리에 사용되는 CPU 프로세스 수 추출", + "是": "예", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "모든 훈련 세트를 GPU 메모리에 캐시할지 여부. 10분 미만의 소량 데이터는 훈련 속도를 높이기 위해 캐시할 수 있지만, 대량 데이터를 캐시하면 메모리가 터지고 속도가 크게 향상되지 않을 수 있습니다.", + "查看": "보기", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "모델 정보보기(작은 모델 파일로 추출된 weights 폴더에서만 지원)", + "检索特征占比": "특징 비율 검색", + "模型": "모델", + "模型推理": "모델 추론", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "모델 추출(로그 폴더에 대형 파일 모델 경로 입력), 반 훈련하고 싶지 않거나 모델이 자동으로 작은 파일 모델로 추출되지 않았거나 중간 모델을 테스트하려는 경우에 사용", + "模型是否带音高指导": "모델에 음높이 안내가 있는지 여부", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "모델에 음높이 안내가 있는지 여부(노래에는 필수, 음성은 선택 사항)", + "模型是否带音高指导,1是0否": "모델에 음높이 안내가 있는지 여부, 1이면 있음 0이면 없음", + "模型版本型号": "모델 버전 및 모델 번호", + "模型融合, 可用于测试音色融合": "모델 통합, 음색 통합 테스트에 사용 가능", + "模型路径": "모델 경로", + "淡入淡出长度": "페이드 인/아웃 길이", + "版本": "버전", + "特征提取": "특성 추출", + "特征检索库文件路径,为空则使用下拉的选择结果": "특성 검색 라이브러리 파일 경로, 비어 있으면 드롭다운 선택 결과 사용", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "남성을 여성으로 추천 +12키, 여성을 남성으로 추천 -12키, 음역 폭발로 음색이 왜곡되면 적절한 음역으로 직접 조절 가능", + "目标采样率": "목표 샘플링률", + "算法延迟(ms):": "알고리즘 지연 시간(ms):", + "自动检测index路径,下拉式选择(dropdown)": "자동으로 index 경로 감지, 드롭다운 선택", + "融合": "융합", + "要改的模型信息": "수정할 모델 정보", + "要置入的模型信息": "삽입할 모델 정보", + "训练": "훈련", + "训练模型": "모델 훈련", + "训练特征索引": "특성 인덱스 훈련", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "훈련 종료, 콘솔 훈련 로그 또는 실험 폴더의 train.log를 확인할 수 있습니다", + "请指定说话人id": "화자 ID 지정", + "请选择index文件": "index 파일 선택", + "请选择pth文件": "pth 파일 선택", + "请选择说话人id": "화자 ID 선택", + "转换": "변환", + "输入实验名": "실험명 입력", + "输入待处理音频文件夹路径": "처리 대기 중인 오디오 폴더 경로 입력", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "처리 대기 중인 오디오 폴더 경로 입력(파일 관리자 주소 표시 줄에서 복사하면 됨)", + "输入待处理音频文件路径(默认是正确格式示例)": "처리 대기 중인 오디오 파일 경로 입력(기본적으로 올바른 형식의 예제)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "소스 음량 에너벌롭을 입력하여 출력 음량 에너벌롭 합성 비율을 대체하면 1에 가까울수록 출력 에너벌롭 사용", + "输入监听": "입력 모니터링", + "输入训练文件夹路径": "훈련 폴더 경로 입력", + "输入设备": "입력 장치", + "输入降噪": "노이즈 감소 입력", + "输出信息": "출력 정보", + "输出变声": "음성 출력", + "输出设备": "출력 장치", + "输出降噪": "노이즈 감소 출력", + "输出音频(右下角三个点,点了可以下载)": "출력 오디오(우하단 세 점, 클릭하면 다운로드 가능)", + "选择.index文件": "index 파일 선택", + "选择.pth文件": "pth 파일 선택", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "음높이 추출 알고리즘 선택, 노래 입력에 pm 사용 가능, harvest는 저음이 좋지만 매우 느림, crepe 효과는 좋지만 GPU 사용, rmvpe 효과가 가장 좋으며 약간의 GPU 사용", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "음높이 추출 알고리즘 선택: 노래 입력에 pm 사용 가능, 고품질 음성이지만 CPU가 낮음, dio 사용 가능, harvest 품질이 더 좋지만 느림, rmvpe 효과가 최고이며 CPU/GPU 약간 사용", + "采样率:": "샘플링률:", + "采样长度": "샘플링 길이", + "重载设备列表": "장치 목록 다시로드", + "音调设置": "음조 설정", + "音频设备(请使用同种类驱动)": "오디오 장치(동일한 유형의 드라이버 사용 권장)", + "音高算法": "음높이 알고리즘", + "额外推理时长": "추가 추론 시간" + } From c24687f620b959972fdd47580b9c0fb14a04cb9e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:16:20 +0800 Subject: [PATCH 051/126] Update requirements.txt --- requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index a8e72ea..1bafeef 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,20 +4,21 @@ tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning -gradio==3.14.0 +gradio==3.38.0 ffmpeg-python onnxruntime tqdm -funasr +funasr==0.8.7 cn2an pypinyin pyopenjtalk g2p_en torchaudio -modelscope +modelscope==1.10.0 sentencepiece transformers chardet PyYAML psutil jieba_fast +jieba From 249561e5a18576010df6587c274d38cbd9e18b4b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:30:39 +0800 Subject: [PATCH 052/126] Add files via upload --- GPT_SoVITS/inference_webui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index fd04ac8..98dab28 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -1,4 +1,9 @@ -import os,re +import os,re,logging +logging.getLogger("markdown_it").setLevel(logging.ERROR) +logging.getLogger("urllib3").setLevel(logging.ERROR) +logging.getLogger("httpcore").setLevel(logging.ERROR) +logging.getLogger("httpx").setLevel(logging.ERROR) +logging.getLogger("asyncio").setLevel(logging.ERROR) import pdb gpt_path = os.environ.get( From 80c9acc43b9f287c363c9992e7fc5262cd3994c6 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:51:36 +0800 Subject: [PATCH 053/126] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4bf6581..4a89637 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb +AutoDL Cloud Docker Training (for users in China region): https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official + ## Features: 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion. From 2bdbfbccec3a807b6d08c922a8fb11a92f07fc95 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:52:20 +0800 Subject: [PATCH 054/126] Update README.md --- docs/cn/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/cn/README.md b/docs/cn/README.md index 072dc0d..2c63814 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -23,6 +23,8 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb +中国地区用户可使用AutoDL云端镜像进行体验:https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official + ## 功能: 1. **零样本文本到语音(TTS):** 输入5秒的声音样本,即刻体验文本到语音转换。 From 658d0dad6376973b5176e89a2fc2016802ff4fd5 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 00:53:28 +0800 Subject: [PATCH 055/126] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a89637..83538a9 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb -AutoDL Cloud Docker Training (for users in China region): https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official +For users in China region, you can use AutoDL Cloud Docker to experience the full functionality online: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official ## Features: 1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion. From 02da15c996dca916c3ff29327ef5ac9a466b92dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:30:08 +0800 Subject: [PATCH 056/126] Add Onnx Export --- GPT_SoVITS/onnx_export.py | 314 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 GPT_SoVITS/onnx_export.py diff --git a/GPT_SoVITS/onnx_export.py b/GPT_SoVITS/onnx_export.py new file mode 100644 index 0000000..f08679f --- /dev/null +++ b/GPT_SoVITS/onnx_export.py @@ -0,0 +1,314 @@ +from module.models_onnx import SynthesizerTrn, symbols +from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule +import torch +import torchaudio +from torch import nn +from feature_extractor import cnhubert +cnhubert_base_path = "pretrained_models/chinese-hubert-base" +cnhubert.cnhubert_base_path=cnhubert_base_path +ssl_model = cnhubert.get_model() +from text import cleaned_text_to_sequence +import soundfile +from my_utils import load_audio +import os +import json + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + hann_window = torch.hann_window(win_size).to( + dtype=y.dtype, device=y.device + ) + y = torch.nn.functional.pad( + y.unsqueeze(1), + (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), + mode="reflect", + ) + y = y.squeeze(1) + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window, + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=False, + ) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +class DictToAttrRecursive(dict): + def __init__(self, input_dict): + super().__init__(input_dict) + for key, value in input_dict.items(): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + self[key] = value + setattr(self, key, value) + + def __getattr__(self, item): + try: + return self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + def __setattr__(self, key, value): + if isinstance(value, dict): + value = DictToAttrRecursive(value) + super(DictToAttrRecursive, self).__setitem__(key, value) + super().__setattr__(key, value) + + def __delattr__(self, item): + try: + del self[item] + except KeyError: + raise AttributeError(f"Attribute {item} not found") + + +class T2SEncoder(nn.Module): + def __init__(self, t2s, vits): + super().__init__() + self.encoder = t2s.onnx_encoder + self.vits = vits + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): + codes = self.vits.extract_latent(ssl_content) + prompt_semantic = codes[0, 0] + bert = torch.cat([ref_bert.transpose(0, 1), text_bert.transpose(0, 1)], 1) + all_phoneme_ids = torch.cat([ref_seq, text_seq], 1) + bert = bert.unsqueeze(0) + prompt = prompt_semantic.unsqueeze(0) + return self.encoder(all_phoneme_ids, bert), prompt + + +class T2SModel(nn.Module): + def __init__(self, t2s_path, vits_model): + super().__init__() + dict_s1 = torch.load(t2s_path, map_location="cpu") + self.config = dict_s1["config"] + self.t2s_model = Text2SemanticLightningModule(self.config, "ojbk", is_train=False) + self.t2s_model.load_state_dict(dict_s1["weight"]) + self.t2s_model.eval() + self.vits_model = vits_model.vq_model + self.hz = 50 + self.max_sec = self.config["data"]["max_sec"] + self.t2s_model.model.top_k = torch.LongTensor([self.config["inference"]["top_k"]]) + self.t2s_model.model.early_stop_num = torch.LongTensor([self.hz * self.max_sec]) + self.t2s_model = self.t2s_model.model + self.t2s_model.init_onnx() + self.onnx_encoder = T2SEncoder(self.t2s_model, self.vits_model) + self.first_stage_decoder = self.t2s_model.first_stage_decoder + self.stage_decoder = self.t2s_model.stage_decoder + #self.t2s_model = torch.jit.script(self.t2s_model) + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content): + early_stop_num = self.t2s_model.early_stop_num + + #[1,N] [1,N] [N, 1024] [N, 1024] [1, 768, N] + x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + + prefix_len = prompts.shape[1] + + #[1,N,512] [1,N] + y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) + + stop = False + for idx in range(1, 1500): + #[1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N] + enco = self.stage_decoder(y, k, v, y_emb, x_example) + y, k, v, y_emb, logits, samples = enco + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.t2s_model.EOS or samples[0, 0] == self.t2s_model.EOS: + stop = True + if stop: + break + y[0, -1] = 0 + + return y[:, -idx:].unsqueeze(0) + + def export(self, ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name, dynamo=False): + #self.onnx_encoder = torch.jit.script(self.onnx_encoder) + if dynamo: + export_options = torch.onnx.ExportOptions(dynamic_shapes=True) + onnx_encoder_export_output = torch.onnx.dynamo_export( + self.onnx_encoder, + (ref_seq, text_seq, ref_bert, text_bert, ssl_content), + export_options=export_options + ) + onnx_encoder_export_output.save(f"onnx/{project_name}/{project_name}_t2s_encoder.onnx") + return + torch.onnx.export( + self.onnx_encoder, + (ref_seq, text_seq, ref_bert, text_bert, ssl_content), + f"onnx/{project_name}/{project_name}_t2s_encoder.onnx", + input_names=["ref_seq", "text_seq", "ref_bert", "text_bert", "ssl_content"], + output_names=["x", "prompts"], + dynamic_axes={ + "ref_seq": [1], + "text_seq": [1], + "ref_bert": [0], + "text_bert": [0], + "ssl_content": [2], + }, + opset_version=16 + ) + x, prompts = self.onnx_encoder(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + torch.exp + torch.onnx.export( + self.first_stage_decoder, + (x, prompts), + f"onnx/{project_name}/{project_name}_t2s_fsdec.onnx", + input_names=["x", "prompts"], + output_names=["y", "k", "v", "y_emb", "x_example"], + dynamic_axes={ + "x": [1], + "prompts": [1], + }, + verbose=True, + opset_version=16 + ) + y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts) + + torch.onnx.export( + self.stage_decoder, + (y, k, v, y_emb, x_example), + f"onnx/{project_name}/{project_name}_t2s_sdec.onnx", + input_names=["iy", "ik", "iv", "iy_emb", "ix_example"], + output_names=["y", "k", "v", "y_emb", "logits", "samples"], + dynamic_axes={ + "iy": [1], + "ik": [1], + "iv": [1], + "iy_emb": [1], + "ix_example": [1], + }, + verbose=True, + opset_version=16 + ) + + +class VitsModel(nn.Module): + def __init__(self, vits_path): + super().__init__() + dict_s2 = torch.load(vits_path,map_location="cpu") + self.hps = dict_s2["config"] + self.hps = DictToAttrRecursive(self.hps) + self.hps.model.semantic_frame_rate = "25hz" + self.vq_model = SynthesizerTrn( + self.hps.data.filter_length // 2 + 1, + self.hps.train.segment_size // self.hps.data.hop_length, + n_speakers=self.hps.data.n_speakers, + **self.hps.model + ) + self.vq_model.eval() + self.vq_model.load_state_dict(dict_s2["weight"], strict=False) + + def forward(self, text_seq, pred_semantic, ref_audio): + refer = spectrogram_torch( + ref_audio, + self.hps.data.filter_length, + self.hps.data.sampling_rate, + self.hps.data.hop_length, + self.hps.data.win_length, + center=False + ) + return self.vq_model(pred_semantic, text_seq, refer)[0, 0] + + +class GptSoVits(nn.Module): + def __init__(self, vits, t2s): + super().__init__() + self.vits = vits + self.t2s = t2s + + def forward(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content): + pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + return self.vits(text_seq, pred_semantic, ref_audio) + + def export(self, ref_seq, text_seq, ref_bert, text_bert, ref_audio, ssl_content, project_name): + self.t2s.export(ref_seq, text_seq, ref_bert, text_bert, ssl_content, project_name) + pred_semantic = self.t2s(ref_seq, text_seq, ref_bert, text_bert, ssl_content) + torch.onnx.export( + self.vits, + (text_seq, pred_semantic, ref_audio), + f"onnx/{project_name}/{project_name}_vits.onnx", + input_names=["text_seq", "pred_semantic", "ref_audio"], + output_names=["audio"], + dynamic_axes={ + "text_seq": [1], + "pred_semantic": [2], + "ref_audio": [1], + }, + opset_version=17 + ) + + +class SSLModel(nn.Module): + def __init__(self): + super().__init__() + self.ssl = ssl_model + + def forward(self, ref_audio_16k): + return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2) + + +def export(vits_path, gpt_path, project_name): + vits = VitsModel(vits_path) + gpt = T2SModel(gpt_path, vits) + gpt_sovits = GptSoVits(vits, gpt) + ssl = SSLModel() + ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])]) + text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])]) + ref_bert = torch.randn((ref_seq.shape[1], 1024)).float() + text_bert = torch.randn((text_seq.shape[1], 1024)).float() + ref_audio = torch.randn((1, 48000 * 5)).float() + # ref_audio = torch.tensor([load_audio("rec.wav", 48000)]).float() + ref_audio_16k = torchaudio.functional.resample(ref_audio,48000,16000).float() + ref_audio_sr = torchaudio.functional.resample(ref_audio,48000,vits.hps.data.sampling_rate).float() + + try: + os.mkdir(f"onnx/{project_name}") + except: + pass + + ssl_content = ssl(ref_audio_16k).float() + + a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy() + + # soundfile.write("out.wav", a, vits.hps.data.sampling_rate) + + gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name) + + MoeVSConf = { + "Folder" : f"{project_name}", + "Name" : f"{project_name}", + "Type" : "GPT-SoVits", + "Rate" : vits.hps.data.sampling_rate, + "NumLayers": gpt.t2s_model.num_layers, + "EmbeddingDim": gpt.t2s_model.embedding_dim, + "Dict": "BasicDict", + "BertPath": "chinese-roberta-wwm-ext-large", + "Symbol": symbols, + "AddBlank": False + } + + MoeVSConfJson = json.dumps(MoeVSConf) + with open(f"onnx/{project_name}.json", 'w') as MoeVsConfFile: + json.dump(MoeVSConf, MoeVsConfFile, indent = 4) + + +if __name__ == "__main__": + try: + os.mkdir("onnx") + except: + pass + + gpt_path = "pt_model/koharu-e20.ckpt" + vits_path = "pt_model/koharu_e20_s4960.pth" + exp_path = "koharu" + export(vits_path, gpt_path, exp_path) + + # soundfile.write("out.wav", a, vits.hps.data.sampling_rate) \ No newline at end of file From bd68358c3f675300f028fe602733456e397d7f3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:30:37 +0800 Subject: [PATCH 057/126] Add Vits Onnx Module --- GPT_SoVITS/module/attentions_onnx.py | 365 +++++++++++ GPT_SoVITS/module/models_onnx.py | 920 +++++++++++++++++++++++++++ 2 files changed, 1285 insertions(+) create mode 100644 GPT_SoVITS/module/attentions_onnx.py create mode 100644 GPT_SoVITS/module/models_onnx.py diff --git a/GPT_SoVITS/module/attentions_onnx.py b/GPT_SoVITS/module/attentions_onnx.py new file mode 100644 index 0000000..df0ae82 --- /dev/null +++ b/GPT_SoVITS/module/attentions_onnx.py @@ -0,0 +1,365 @@ +import math +import torch +from torch import nn +from torch.nn import functional as F + +from module import commons +from module.modules import LayerNorm + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=4, + isflow=True, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + # if isflow: + # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) + # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) + # self.cond_layer = weight_norm(cond_layer, name='weight') + # self.gin_channels = 256 + self.cond_layer_idx = self.n_layers + if "gin_channels" in kwargs: + self.gin_channels = kwargs["gin_channels"] + if self.gin_channels != 0: + self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) + # vits2 says 3rd block, so idx is 2 by default + self.cond_layer_idx = ( + kwargs["cond_layer_idx"] if "cond_layer_idx" in kwargs else 2 + ) + logging.debug(self.gin_channels, self.cond_layer_idx) + assert ( + self.cond_layer_idx < self.n_layers + ), "cond_layer_idx should be less than n_layers" + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size, + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, g=None): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + if i == self.cond_layer_idx and g is not None: + g = self.spk_emb_linear(g.transpose(1, 2)) + g = g.transpose(1, 2) + x = x + g + x = x * x_mask + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = nn.Parameter( + torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) + * rel_stddev + ) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, _ = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), key_relative_embeddings + ) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s + ) + output = output + self._matmul_with_relative_values( + relative_weights, value_relative_embeddings + ) + output = ( + output.transpose(2, 3).contiguous().view(b, d, -1) + ) + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + ) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[ + :, slice_start_position:slice_end_position + ] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad( + x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + ) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ + :, :, :length, length - 1 : + ] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad( + x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + ) + x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__( + self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0.0, + activation=None, + causal=False, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, commons.convert_pad_shape(padding)) + return x diff --git a/GPT_SoVITS/module/models_onnx.py b/GPT_SoVITS/module/models_onnx.py new file mode 100644 index 0000000..35fd291 --- /dev/null +++ b/GPT_SoVITS/module/models_onnx.py @@ -0,0 +1,920 @@ +import copy +import math +import torch +from torch import nn +from torch.nn import functional as F + +from module import commons +from module import modules +from module import attentions_onnx as attentions + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from module.commons import init_weights, get_padding +from module.mrte_model import MRTE +from module.quantize import ResidualVectorQuantizer +from text import symbols +from torch.cuda.amp import autocast + + +class StochasticDurationPredictor(nn.Module): + def __init__( + self, + in_channels, + filter_channels, + kernel_size, + p_dropout, + n_flows=4, + gin_channels=0, + ): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) + ) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv( + filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout + ) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append( + modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3) + ) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv( + filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout + ) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = ( + torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) + * x_mask + ) + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum( + (F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2] + ) + logq = ( + torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2]) + - logdet_tot_q + ) + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = ( + torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2]) + - logdet_tot + ) + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = ( + torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) + * noise_scale + ) + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__( + self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0 + ): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d( + in_channels, filter_channels, kernel_size, padding=kernel_size // 2 + ) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d( + filter_channels, filter_channels, kernel_size, padding=kernel_size // 2 + ) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + latent_channels=192, + ): + super().__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.latent_channels = latent_channels + + self.ssl_proj = nn.Conv1d(768, hidden_channels, 1) + + self.encoder_ssl = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers // 2, + kernel_size, + p_dropout, + ) + + self.encoder_text = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.text_embedding = nn.Embedding(len(symbols), hidden_channels) + + self.mrte = MRTE() + + self.encoder2 = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers // 2, + kernel_size, + p_dropout, + ) + + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, y, text, ge): + y_mask = torch.ones_like(y[:1,:1,:]) + + y = self.ssl_proj(y * y_mask) * y_mask + y = self.encoder_ssl(y * y_mask, y_mask) + + text_mask = torch.ones_like(text).to(y.dtype).unsqueeze(0) + + text = self.text_embedding(text).transpose(1, 2) + text = self.encoder_text(text * text_mask, text_mask) + y = self.mrte(y, y_mask, text, text_mask, ge) + + y = self.encoder2(y * y_mask, y_mask) + + stats = self.proj(y) * y_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return y, m, logs, y_mask + + def extract_latent(self, x): + x = self.ssl_proj(x) + quantized, codes, commit_loss, quantized_list = self.quantizer(x) + return codes.transpose(0, 1) + + def decode_latent(self, codes, y_mask, refer, refer_mask, ge): + quantized = self.quantizer.decode(codes) + + y = self.vq_proj(quantized) * y_mask + y = self.encoder_ssl(y * y_mask, y_mask) + + y = self.mrte(y, y_mask, refer, refer_mask, ge) + + y = self.encoder2(y * y_mask, y_mask) + + stats = self.proj(y) * y_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + return y, m, logs, y_mask, quantized + + +class ResidualCouplingBlock(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + modules.ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + if g != None: + g = g.detach() + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class WNEncoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.norm = modules.LayerNorm(out_channels) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( + x.dtype + ) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + out = self.proj(x) * x_mask + out = self.norm(out) + return out + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print("Removing weight norm...") + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + 32, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 32, + 128, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 128, + 512, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 512, + 1024, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ), + norm_f( + Conv2d( + 1024, + 1024, + (kernel_size, 1), + 1, + padding=(get_padding(kernel_size, 1), 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [ + DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods + ] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class ReferenceEncoder(nn.Module): + """ + inputs --- [N, Ty/r, n_mels*r] mels + outputs --- [N, ref_enc_gru_size] + """ + + def __init__(self, spec_channels, gin_channels=0): + super().__init__() + self.spec_channels = spec_channels + ref_enc_filters = [32, 32, 64, 64, 128, 128] + K = len(ref_enc_filters) + filters = [1] + ref_enc_filters + convs = [ + weight_norm( + nn.Conv2d( + in_channels=filters[i], + out_channels=filters[i + 1], + kernel_size=(3, 3), + stride=(2, 2), + padding=(1, 1), + ) + ) + for i in range(K) + ] + self.convs = nn.ModuleList(convs) + # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) + + out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) + self.gru = nn.GRU( + input_size=ref_enc_filters[-1] * out_channels, + hidden_size=256 // 2, + batch_first=True, + ) + self.proj = nn.Linear(128, gin_channels) + + def forward(self, inputs): + N = inputs.size(0) + out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] + for conv in self.convs: + out = conv(out) + # out = wn(out) + out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] + + out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] + T = out.size(1) + N = out.size(0) + out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] + + self.gru.flatten_parameters() + memory, out = self.gru(out) # out --- [1, N, 128] + + return self.proj(out.squeeze(0)).unsqueeze(-1) + + def calculate_channels(self, L, kernel_size, stride, pad, n_convs): + for i in range(n_convs): + L = (L - kernel_size + 2 * pad) // stride + 1 + return L + + +class Quantizer_module(torch.nn.Module): + def __init__(self, n_e, e_dim): + super(Quantizer_module, self).__init__() + self.embedding = nn.Embedding(n_e, e_dim) + self.embedding.weight.data.uniform_(-1.0 / n_e, 1.0 / n_e) + + def forward(self, x): + d = ( + torch.sum(x**2, 1, keepdim=True) + + torch.sum(self.embedding.weight**2, 1) + - 2 * torch.matmul(x, self.embedding.weight.T) + ) + min_indicies = torch.argmin(d, 1) + z_q = self.embedding(min_indicies) + return z_q, min_indicies + + +class Quantizer(torch.nn.Module): + def __init__(self, embed_dim=512, n_code_groups=4, n_codes=160): + super(Quantizer, self).__init__() + assert embed_dim % n_code_groups == 0 + self.quantizer_modules = nn.ModuleList( + [ + Quantizer_module(n_codes, embed_dim // n_code_groups) + for _ in range(n_code_groups) + ] + ) + self.n_code_groups = n_code_groups + self.embed_dim = embed_dim + + def forward(self, xin): + # B, C, T + B, C, T = xin.shape + xin = xin.transpose(1, 2) + x = xin.reshape(-1, self.embed_dim) + x = torch.split(x, self.embed_dim // self.n_code_groups, dim=-1) + min_indicies = [] + z_q = [] + for _x, m in zip(x, self.quantizer_modules): + _z_q, _min_indicies = m(_x) + z_q.append(_z_q) + min_indicies.append(_min_indicies) # B * T, + z_q = torch.cat(z_q, -1).reshape(xin.shape) + loss = 0.25 * torch.mean((z_q.detach() - xin) ** 2) + torch.mean( + (z_q - xin.detach()) ** 2 + ) + z_q = xin + (z_q - xin).detach() + z_q = z_q.transpose(1, 2) + codes = torch.stack(min_indicies, -1).reshape(B, T, self.n_code_groups) + return z_q, loss, codes.transpose(1, 2) + + def embed(self, x): + # idx: N, 4, T + x = x.transpose(1, 2) + x = torch.split(x, 1, 2) + ret = [] + for q, embed in zip(x, self.quantizer_modules): + q = embed.embedding(q.squeeze(-1)) + ret.append(q) + ret = torch.cat(ret, -1) + return ret.transpose(1, 2) # N, C, T + + +class CodePredictor(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + n_q=8, + dims=1024, + ssl_dim=768, + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.vq_proj = nn.Conv1d(ssl_dim, hidden_channels, 1) + self.ref_enc = modules.MelStyleEncoder( + ssl_dim, style_vector_dim=hidden_channels + ) + + self.encoder = attentions.Encoder( + hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + + self.out_proj = nn.Conv1d(hidden_channels, (n_q - 1) * dims, 1) + self.n_q = n_q + self.dims = dims + + def forward(self, x, x_mask, refer, codes, infer=False): + x = x.detach() + x = self.vq_proj(x * x_mask) * x_mask + g = self.ref_enc(refer, x_mask) + x = x + g + x = self.encoder(x * x_mask, x_mask) + x = self.out_proj(x * x_mask) * x_mask + logits = x.reshape(x.shape[0], self.n_q - 1, self.dims, x.shape[-1]).transpose( + 2, 3 + ) + target = codes[1:].transpose(0, 1) + if not infer: + logits = logits.reshape(-1, self.dims) + target = target.reshape(-1) + loss = torch.nn.functional.cross_entropy(logits, target) + return loss + else: + _, top10_preds = torch.topk(logits, 10, dim=-1) + correct_top10 = torch.any(top10_preds == target.unsqueeze(-1), dim=-1) + top3_acc = 100 * torch.mean(correct_top10.float()).detach().cpu().item() + + print("Top-10 Accuracy:", top3_acc, "%") + + pred_codes = torch.argmax(logits, dim=-1) + acc = 100 * torch.mean((pred_codes == target).float()).detach().cpu().item() + print("Top-1 Accuracy:", acc, "%") + + return pred_codes.transpose(0, 1) + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + semantic_frame_rate=None, + freeze_quantizer=None, + **kwargs + ): + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + self.enc_p = TextEncoder( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels + ) + + self.ref_enc = modules.MelStyleEncoder( + spec_channels, style_vector_dim=gin_channels + ) + + ssl_dim = 768 + self.ssl_dim = ssl_dim + assert semantic_frame_rate in ["25hz", "50hz"] + self.semantic_frame_rate = semantic_frame_rate + if semantic_frame_rate == "25hz": + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 2, stride=2) + else: + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1) + + self.quantizer = ResidualVectorQuantizer(dimension=ssl_dim, n_q=1, bins=1024) + if freeze_quantizer: + self.ssl_proj.requires_grad_(False) + self.quantizer.requires_grad_(False) + # self.enc_p.text_embedding.requires_grad_(False) + # self.enc_p.encoder_text.requires_grad_(False) + # self.enc_p.mrte.requires_grad_(False) + + def forward(self, codes, text, refer): + refer_mask = torch.ones_like(refer[:1,:1,:]) + ge = self.ref_enc(refer * refer_mask, refer_mask) + + y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device) + text_lengths = torch.LongTensor([text.size(-1)]).to(text.device) + + quantized = self.quantizer.decode(codes) + if self.semantic_frame_rate == "25hz": + dquantized = torch.cat([quantized, quantized]).permute(1, 2, 0) + quantized = dquantized.contiguous().view(1, self.ssl_dim, -1) + + x, m_p, logs_p, y_mask = self.enc_p( + quantized, text, ge + ) + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) + + z = self.flow(z_p, y_mask, g=ge, reverse=True) + + o = self.dec((z * y_mask)[:, :, :], g=ge) + return o + + def extract_latent(self, x): + ssl = self.ssl_proj(x) + quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) + return codes.transpose(0, 1) \ No newline at end of file From 7d1e94c8b05e102e1914fd59171cb2b908fd8d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Thu, 25 Jan 2024 02:31:08 +0800 Subject: [PATCH 058/126] Add AR Onnx Module --- .../AR/models/t2s_lightning_module_onnx.py | 106 ++++++ GPT_SoVITS/AR/models/t2s_model_onnx.py | 337 ++++++++++++++++++ GPT_SoVITS/AR/modules/activation_onnx.py | 178 +++++++++ GPT_SoVITS/AR/modules/embedding_onnx.py | 63 ++++ .../AR/modules/patched_mha_with_cache_onnx.py | 92 +++++ GPT_SoVITS/AR/modules/transformer_onnx.py | 292 +++++++++++++++ 6 files changed, 1068 insertions(+) create mode 100644 GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py create mode 100644 GPT_SoVITS/AR/models/t2s_model_onnx.py create mode 100644 GPT_SoVITS/AR/modules/activation_onnx.py create mode 100644 GPT_SoVITS/AR/modules/embedding_onnx.py create mode 100644 GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py create mode 100644 GPT_SoVITS/AR/modules/transformer_onnx.py diff --git a/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py new file mode 100644 index 0000000..bb9e30b --- /dev/null +++ b/GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py @@ -0,0 +1,106 @@ +# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py +import os, sys + +now_dir = os.getcwd() +sys.path.append(now_dir) +from typing import Dict + +import torch +from pytorch_lightning import LightningModule +from AR.models.t2s_model_onnx import Text2SemanticDecoder +from AR.modules.lr_schedulers import WarmupCosineLRSchedule +from AR.modules.optim import ScaledAdam + + +class Text2SemanticLightningModule(LightningModule): + def __init__(self, config, output_dir, is_train=True): + super().__init__() + self.config = config + self.top_k = 3 + self.model = Text2SemanticDecoder(config=config, top_k=self.top_k) + pretrained_s1 = config.get("pretrained_s1") + if pretrained_s1 and is_train: + # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"])) + print( + self.load_state_dict( + torch.load(pretrained_s1, map_location="cpu")["weight"] + ) + ) + if is_train: + self.automatic_optimization = False + self.save_hyperparameters() + self.eval_dir = output_dir / "eval" + self.eval_dir.mkdir(parents=True, exist_ok=True) + + def training_step(self, batch: Dict, batch_idx: int): + opt = self.optimizers() + scheduler = self.lr_schedulers() + loss, acc = self.model.forward( + batch["phoneme_ids"], + batch["phoneme_ids_len"], + batch["semantic_ids"], + batch["semantic_ids_len"], + batch["bert_feature"], + ) + self.manual_backward(loss) + if batch_idx > 0 and batch_idx % 4 == 0: + opt.step() + opt.zero_grad() + scheduler.step() + + self.log( + "total_loss", + loss, + on_step=True, + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + self.log( + "lr", + scheduler.get_last_lr()[0], + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + self.log( + f"top_{self.top_k}_acc", + acc, + on_step=True, + on_epoch=True, + prog_bar=True, + sync_dist=True, + ) + + def validation_step(self, batch: Dict, batch_idx: int): + return + + def configure_optimizers(self): + model_parameters = self.model.parameters() + parameters_names = [] + parameters_names.append( + [name_param_pair[0] for name_param_pair in self.model.named_parameters()] + ) + lm_opt = ScaledAdam( + model_parameters, + lr=0.01, + betas=(0.9, 0.95), + clipping_scale=2.0, + parameters_names=parameters_names, + show_dominant_parameters=False, + clipping_update_period=1000, + ) + + return { + "optimizer": lm_opt, + "lr_scheduler": { + "scheduler": WarmupCosineLRSchedule( + lm_opt, + init_lr=self.config["optimizer"]["lr_init"], + peak_lr=self.config["optimizer"]["lr"], + end_lr=self.config["optimizer"]["lr_end"], + warmup_steps=self.config["optimizer"]["warmup_steps"], + total_steps=self.config["optimizer"]["decay_steps"], + ) + }, + } diff --git a/GPT_SoVITS/AR/models/t2s_model_onnx.py b/GPT_SoVITS/AR/models/t2s_model_onnx.py new file mode 100644 index 0000000..263b933 --- /dev/null +++ b/GPT_SoVITS/AR/models/t2s_model_onnx.py @@ -0,0 +1,337 @@ +# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py +import torch +from tqdm import tqdm + +from AR.modules.embedding_onnx import SinePositionalEmbedding +from AR.modules.embedding_onnx import TokenEmbedding +from AR.modules.transformer_onnx import LayerNorm +from AR.modules.transformer_onnx import TransformerEncoder +from AR.modules.transformer_onnx import TransformerEncoderLayer +from torch import nn +from torch.nn import functional as F +from torchmetrics.classification import MulticlassAccuracy + +default_config = { + "embedding_dim": 512, + "hidden_dim": 512, + "num_head": 8, + "num_layers": 12, + "num_codebook": 8, + "p_dropout": 0.0, + "vocab_size": 1024 + 1, + "phoneme_vocab_size": 512, + "EOS": 1024, +} + +inf_tensor_value = torch.FloatTensor([-float("Inf")]).float() + +def logits_to_probs( + logits, + previous_tokens = None, + temperature: float = 1.0, + top_k = None, + top_p = None, + repetition_penalty: float = 1.0, +): + previous_tokens = previous_tokens.squeeze() + if previous_tokens is not None and repetition_penalty != 1.0: + previous_tokens = previous_tokens.long() + score = torch.gather(logits, dim=0, index=previous_tokens) + score = torch.where( + score < 0, score * repetition_penalty, score / repetition_penalty + ) + logits.scatter_(dim=0, index=previous_tokens, src=score) + + if top_p is not None and top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cum_probs = torch.cumsum( + torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1 + ) + sorted_indices_to_remove = cum_probs > top_p + sorted_indices_to_remove[0] = False # keep at least one option + indices_to_remove = sorted_indices_to_remove.scatter( + dim=0, index=sorted_indices, src=sorted_indices_to_remove + ) + logits = logits.masked_fill(indices_to_remove, -float("Inf")) + + logits = logits / max(temperature, 1e-5) + + if top_k is not None: + v, _ = torch.topk(logits, min(top_k, logits.size(-1))) + pivot = v.select(-1, -1).unsqueeze(-1) + logits = torch.where(logits < pivot, inf_tensor_value, logits) + + probs = torch.nn.functional.softmax(logits, dim=-1) + return probs + + +def multinomial_sample_one_no_sync( + probs_sort +): # Does multinomial sampling without a cuda synchronization + q = torch.randn_like(probs_sort) + return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) + + +def sample( + logits, + previous_tokens, + **sampling_kwargs, +): + probs = logits_to_probs( + logits=logits, previous_tokens=previous_tokens, **sampling_kwargs + ) + idx_next = multinomial_sample_one_no_sync(probs) + return idx_next, probs + + +class OnnxEncoder(nn.Module): + def __init__(self, ar_text_embedding, bert_proj, ar_text_position): + super().__init__() + self.ar_text_embedding = ar_text_embedding + self.bert_proj = bert_proj + self.ar_text_position = ar_text_position + + def forward(self, x, bert_feature): + x = self.ar_text_embedding(x) + x = x + self.bert_proj(bert_feature.transpose(1, 2)) + return self.ar_text_position(x) + + +class T2SFirstStageDecoder(nn.Module): + def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, + top_k, early_stop_num, num_layers): + super().__init__() + self.ar_audio_embedding = ar_audio_embedding + self.ar_audio_position = ar_audio_position + self.h = h + self.ar_predict_layer = ar_predict_layer + self.loss_fct = loss_fct + self.ar_accuracy_metric = ar_accuracy_metric + self.top_k = top_k + self.early_stop_num = early_stop_num + self.num_layers = num_layers + + def forward(self, x, prompt): + y = prompt + x_example = x[:,:,0] * 0.0 + #N, 1, 512 + cache = { + "all_stage": self.num_layers, + "k": None, + "v": None, + "y_emb": None, + "first_infer": 1, + "stage": 0, + } + + y_emb = self.ar_audio_embedding(y) + + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + + xy_pos = torch.concat([x, y_pos], dim=1) + + y_example = y_pos[:,:,0] * 0.0 + x_attn_mask = torch.matmul(x_example.transpose(0, 1) , x_example).bool() + y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64) + y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum( + torch.ones_like(y_example.transpose(0, 1), dtype=torch.int64), dim=0 + ) + y_attn_mask = y_attn_mask > 0 + + x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool() + y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool() + x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1) + y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1) + xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) + cache["k"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\ + .unsqueeze(1).repeat(self.num_layers, 1, 1, 1) + cache["v"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\ + .unsqueeze(1).repeat(self.num_layers, 1, 1, 1) + + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + + y = torch.concat([y, samples], dim=1) + + return y, cache["k"], cache["v"], cache["y_emb"], x_example + + +class T2SStageDecoder(nn.Module): + def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric, + top_k, early_stop_num, num_layers): + super().__init__() + self.ar_audio_embedding = ar_audio_embedding + self.ar_audio_position = ar_audio_position + self.h = h + self.ar_predict_layer = ar_predict_layer + self.loss_fct = loss_fct + self.ar_accuracy_metric = ar_accuracy_metric + self.top_k = top_k + self.early_stop_num = early_stop_num + self.num_layers = num_layers + + def forward(self, y, k, v, y_emb, x_example): + cache = { + "all_stage": self.num_layers, + "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)), + "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)), + "y_emb": y_emb, + "first_infer": 0, + "stage": 0, + } + + y_emb = torch.cat( + [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1 + ) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + + xy_pos = y_pos[:, -1:] + + y_example = y_pos[:,:,0] * 0.0 + + xy_attn_mask = torch.cat([x_example, y_example], dim=1) + xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool) + + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + + y = torch.concat([y, samples], dim=1) + + return y, cache["k"], cache["v"], cache["y_emb"], logits, samples + + +class Text2SemanticDecoder(nn.Module): + def __init__(self, config, norm_first=False, top_k=3): + super(Text2SemanticDecoder, self).__init__() + self.model_dim = config["model"]["hidden_dim"] + self.embedding_dim = config["model"]["embedding_dim"] + self.num_head = config["model"]["head"] + self.num_layers = config["model"]["n_layer"] + self.norm_first = norm_first + self.vocab_size = config["model"]["vocab_size"] + self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"] + self.p_dropout = float(config["model"]["dropout"]) + self.EOS = config["model"]["EOS"] + self.norm_first = norm_first + assert self.EOS == self.vocab_size - 1 + self.bert_proj = nn.Linear(1024, self.embedding_dim) + self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout) + self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) + self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout) + self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True) + self.h = TransformerEncoder( + TransformerEncoderLayer( + d_model=self.model_dim, + nhead=self.num_head, + dim_feedforward=self.model_dim * 4, + dropout=0.1, + batch_first=True, + norm_first=norm_first, + ), + num_layers=self.num_layers, + norm=LayerNorm(self.model_dim) if norm_first else None, + ) + self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False) + self.loss_fct = nn.CrossEntropyLoss(reduction="sum") + self.ar_accuracy_metric = MulticlassAccuracy( + self.vocab_size, + top_k=top_k, + average="micro", + multidim_average="global", + ignore_index=self.EOS, + ) + self.top_k = torch.LongTensor([1]) + self.early_stop_num = torch.LongTensor([-1]) + + def init_onnx(self): + self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position) + self.first_stage_decoder = T2SFirstStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h, + self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, + self.num_layers) + self.stage_decoder = T2SStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h, + self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num, + self.num_layers) + + def forward(self, x, prompts, bert_feature): + early_stop_num = self.early_stop_num + prefix_len = prompts.shape[1] + + x = self.onnx_encoder(x, bert_feature) + y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts) + + stop = False + for idx in range(1, 1500): + enco = self.stage_decoder(y, k, v, y_emb, stage, x_example) + y, k, v, y_emb, stage, logits, samples = enco + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: + stop = True + if stop: + break + y[0, -1] = 0 + return y, idx + + def infer(self, x, prompts, bert_feature): + top_k = self.top_k + early_stop_num = self.early_stop_num + + x = self.onnx_encoder(x, bert_feature) + + y = prompts + prefix_len = y.shape[1] + x_len = x.shape[1] + x_example = x[:,:,0] * 0.0 + x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example) + x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool) + + stop = False + cache = { + "all_stage": self.num_layers, + "k": [None] * self.num_layers, + "v": [None] * self.num_layers, + "y_emb": None, + "first_infer": 1, + "stage": 0, + } + for idx in range(1500): + if cache["first_infer"] == 1: + y_emb = self.ar_audio_embedding(y) + else: + y_emb = torch.cat( + [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1 + ) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + if cache["first_infer"] == 1: + xy_pos = torch.concat([x, y_pos], dim=1) + else: + xy_pos = y_pos[:, -1:] + y_len = y_pos.shape[1] + if cache["first_infer"] == 1: + x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True) + y_attn_mask = F.pad( + torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), + (x_len, 0), value=False + ) + xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0) + else: + xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool) + xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache) + logits = self.ar_predict_layer(xy_dec[:, -1]) + samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0) + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: + stop = True + if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS: + stop = True + if stop: + if prompts.shape[1] == y.shape[1]: + y = torch.concat([y, torch.zeros_like(samples)], dim=1) + break + y = torch.concat([y, samples], dim=1) + cache["first_infer"] = 0 + return y, idx \ No newline at end of file diff --git a/GPT_SoVITS/AR/modules/activation_onnx.py b/GPT_SoVITS/AR/modules/activation_onnx.py new file mode 100644 index 0000000..b54acd9 --- /dev/null +++ b/GPT_SoVITS/AR/modules/activation_onnx.py @@ -0,0 +1,178 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py +from typing import Optional +from typing import Tuple +import torch +from torch import Tensor +from torch.nn import Linear +from torch.nn import Module +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.init import xavier_uniform_ +from torch.nn.modules.linear import NonDynamicallyQuantizableLinear +from torch.nn.parameter import Parameter + +from torch.nn import functional as F +from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched + + +class MultiheadAttention(Module): + __constants__ = ["batch_first"] + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + kdim=None, + vdim=None, + batch_first=False, + linear1_cls=Linear, + linear2_cls=Linear, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.batch_first = batch_first + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + if add_bias_kv: + self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs)) + self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs)) + else: + self.bias_k = self.bias_v = None + + if linear1_cls == Linear: + if not self._qkv_same_embed_dim: + self.q_proj_weight = Parameter( + torch.empty((embed_dim, embed_dim), **factory_kwargs) + ) + self.k_proj_weight = Parameter( + torch.empty((embed_dim, self.kdim), **factory_kwargs) + ) + self.v_proj_weight = Parameter( + torch.empty((embed_dim, self.vdim), **factory_kwargs) + ) + self.register_parameter("in_proj_weight", None) + else: + self.in_proj_weight = Parameter( + torch.empty((3 * embed_dim, embed_dim), **factory_kwargs) + ) + self.register_parameter("q_proj_weight", None) + self.register_parameter("k_proj_weight", None) + self.register_parameter("v_proj_weight", None) + + if bias: + self.in_proj_bias = Parameter( + torch.empty(3 * embed_dim, **factory_kwargs) + ) + else: + self.register_parameter("in_proj_bias", None) + self.out_proj = NonDynamicallyQuantizableLinear( + embed_dim, embed_dim, bias=bias, **factory_kwargs + ) + + self._reset_parameters() + else: + if not self._qkv_same_embed_dim: + raise NotImplementedError + else: + self.in_proj_linear = linear1_cls( + embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs + ) + self.in_proj_weight = self.in_proj_linear.weight + + self.register_parameter("q_proj_weight", None) + self.register_parameter("k_proj_weight", None) + self.register_parameter("v_proj_weight", None) + + if bias: + self.in_proj_bias = self.in_proj_linear.bias + else: + self.register_parameter("in_proj_bias", None) + + self.out_proj = linear2_cls( + embed_dim, embed_dim, bias=bias, **factory_kwargs + ) + + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + self.add_zero_attn = add_zero_attn + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.0) + constant_(self.out_proj.bias, 0.0) + + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if "_qkv_same_embed_dim" not in state: + state["_qkv_same_embed_dim"] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + average_attn_weights: bool = True, + cache=None, + ) -> Tuple[Tensor, Optional[Tensor]]: + any_nested = query.is_nested or key.is_nested or value.is_nested + query = key = value = query.transpose(1, 0) + attn_output = multi_head_attention_forward_patched( + query, + key, + value, + self.embed_dim, + self.num_heads, + self.in_proj_weight, + self.in_proj_bias, + self.bias_k, + self.bias_v, + self.add_zero_attn, + self.dropout, + self.out_proj.weight, + self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + average_attn_weights=average_attn_weights, + cache=cache, + ) + return attn_output.transpose(1, 0) diff --git a/GPT_SoVITS/AR/modules/embedding_onnx.py b/GPT_SoVITS/AR/modules/embedding_onnx.py new file mode 100644 index 0000000..b93405b --- /dev/null +++ b/GPT_SoVITS/AR/modules/embedding_onnx.py @@ -0,0 +1,63 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py +import math + +import torch +from torch import nn + + +class TokenEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + vocab_size: int, + dropout: float = 0.0, + ): + super().__init__() + + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + + self.dropout = torch.nn.Dropout(p=dropout) + self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim) + + @property + def weight(self) -> torch.Tensor: + return self.word_embeddings.weight + + def embedding(self, index: int) -> torch.Tensor: + return self.word_embeddings.weight[index : index + 1] + + def forward(self, x: torch.Tensor): + x = self.word_embeddings(x) + x = self.dropout(x) + return x + + +class SinePositionalEmbedding(nn.Module): + def __init__( + self, + embedding_dim: int, + dropout: float = 0.0, + scale: bool = False, + alpha: bool = False, + ): + super().__init__() + self.embedding_dim = embedding_dim + self.x_scale = math.sqrt(embedding_dim) if scale else 1.0 + self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha) + self.dropout = torch.nn.Dropout(p=dropout) + self.reverse = False + self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim)) + + def extend_pe(self, x): + position = torch.cumsum(torch.ones_like(x[:,:,0]), dim=1).transpose(0, 1) + scpe = (position * self.div_term).unsqueeze(0) + pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0) + pe = pe.contiguous().view(1, -1, self.embedding_dim) + return pe + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pe = self.extend_pe(x) + output = x.unsqueeze(-1) if x.ndim == 2 else x + output = output * self.x_scale + self.alpha * pe + return self.dropout(output) diff --git a/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py new file mode 100644 index 0000000..14bdb55 --- /dev/null +++ b/GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py @@ -0,0 +1,92 @@ +from torch.nn.functional import * +from torch.nn.functional import ( + _mha_shape_check, + _canonical_mask, + _none_or_dtype, + _in_projection_packed, +) + +def multi_head_attention_forward_patched( + query, + key, + value, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight, + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, + cache=None, +) -> Tuple[Tensor, Optional[Tensor]]: + + # set up shape vars + _, _, embed_dim = query.shape + attn_mask = _canonical_mask( + mask=attn_mask, + mask_name="attn_mask", + other_type=None, + other_name="", + target_type=query.dtype, + check_other=False, + ) + head_dim = embed_dim // num_heads + + proj_qkv = linear(query, in_proj_weight, in_proj_bias) + proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() + q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2] + + if cache["first_infer"] == 1: + cache["k"][cache["stage"]] = k + cache["v"][cache["stage"]] = v + else: + cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0) + cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0) + k = cache["k"][cache["stage"]] + v = cache["v"][cache["stage"]] + cache["stage"] = (cache["stage"] + 1) % cache["all_stage"] + + attn_mask = _canonical_mask( + mask=attn_mask, + mask_name="attn_mask", + other_type=None, + other_name="", + target_type=q.dtype, + check_other=False, + ) + attn_mask = attn_mask.unsqueeze(0) + + q = q.view(-1, num_heads, head_dim).transpose(0, 1) + k = k.view(-1, num_heads, head_dim).transpose(0, 1) + v = v.view(-1, num_heads, head_dim).transpose(0, 1) + + dropout_p = 0.0 + attn_mask = attn_mask.unsqueeze(0) + q = q.view(num_heads, -1, head_dim).unsqueeze(0) + k = k.view(num_heads, -1, head_dim).unsqueeze(0) + v = v.view(num_heads, -1, head_dim).unsqueeze(0) + attn_output = scaled_dot_product_attention( + q, k, v, attn_mask, dropout_p, is_causal + ) + attn_output = ( + attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim) + ) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + attn_output = attn_output.view(-1, 1, attn_output.size(1)) + + return attn_output diff --git a/GPT_SoVITS/AR/modules/transformer_onnx.py b/GPT_SoVITS/AR/modules/transformer_onnx.py new file mode 100644 index 0000000..a3f68b4 --- /dev/null +++ b/GPT_SoVITS/AR/modules/transformer_onnx.py @@ -0,0 +1,292 @@ +# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py +import copy +import numbers +from functools import partial +from typing import Any +from typing import Callable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import torch +from AR.modules.activation_onnx import MultiheadAttention +from AR.modules.scaling import BalancedDoubleSwish +from torch import nn +from torch import Tensor +from torch.nn import functional as F + +_shape_t = Union[int, List[int], torch.Size] + + +class LayerNorm(nn.Module): + __constants__ = ["normalized_shape", "eps", "elementwise_affine"] + normalized_shape: Tuple[int, ...] + eps: float + elementwise_affine: bool + + def __init__( + self, + normalized_shape: _shape_t, + eps: float = 1e-5, + elementwise_affine: bool = True, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(LayerNorm, self).__init__() + if isinstance(normalized_shape, numbers.Integral): + # mypy error: incompatible types in assignment + normalized_shape = (normalized_shape,) # type: ignore[assignment] + self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] + self.eps = eps + self.elementwise_affine = elementwise_affine + if self.elementwise_affine: + self.weight = nn.Parameter( + torch.empty(self.normalized_shape, **factory_kwargs) + ) + self.bias = nn.Parameter( + torch.empty(self.normalized_shape, **factory_kwargs) + ) + else: + self.register_parameter("weight", None) + self.register_parameter("bias", None) + + self.reset_parameters() + + def reset_parameters(self) -> None: + if self.elementwise_affine: + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, input: Tensor, embedding: Any = None) -> Tensor: + if isinstance(input, tuple): + input, embedding = input + return ( + F.layer_norm( + input, + self.normalized_shape, + self.weight, + self.bias, + self.eps, + ), + embedding, + ) + + assert embedding is None + return F.layer_norm( + input, self.normalized_shape, self.weight, self.bias, self.eps + ) + + def extra_repr(self) -> str: + return ( + "{normalized_shape}, eps={eps}, " + "elementwise_affine={elementwise_affine}".format(**self.__dict__) + ) + + +class IdentityNorm(nn.Module): + def __init__( + self, + d_model: int, + eps: float = 1e-5, + device=None, + dtype=None, + ) -> None: + super(IdentityNorm, self).__init__() + + def forward(self, input: Tensor, embedding: Any = None) -> Tensor: + if isinstance(input, tuple): + return input + + assert embedding is None + return input + + +class TransformerEncoder(nn.Module): + r"""TransformerEncoder is a stack of N encoder layers. Users can build the + BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. + + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + enable_nested_tensor: if True, input will automatically convert to nested tensor + (and convert back on output). This will improve the overall performance of + TransformerEncoder when padding rate is high. Default: ``True`` (enabled). + + Examples:: + >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) + >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> out = transformer_encoder(src) + """ + __constants__ = ["norm"] + + def __init__(self, encoder_layer, num_layers, norm=None): + super(TransformerEncoder, self).__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward( + self, + src: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + return_layer_states: bool = False, + cache=None, + ) -> Tensor: + output = src + for mod in self.layers: + output = mod( + output, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + cache=cache, + ) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerEncoderLayer(nn.Module): + __constants__ = ["batch_first", "norm_first"] + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, + batch_first: bool = False, + norm_first: bool = False, + device=None, + dtype=None, + linear1_self_attention_cls: nn.Module = nn.Linear, + linear2_self_attention_cls: nn.Module = nn.Linear, + linear1_feedforward_cls: nn.Module = nn.Linear, + linear2_feedforward_cls: nn.Module = nn.Linear, + layer_norm_cls: nn.Module = LayerNorm, + layer_norm_eps: float = 1e-5, + adaptive_layer_norm=False, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(TransformerEncoderLayer, self).__init__() + self.self_attn = MultiheadAttention( + d_model, # 512 16 + nhead, + dropout=dropout, + batch_first=batch_first, + linear1_cls=linear1_self_attention_cls, + linear2_cls=linear2_self_attention_cls, + **factory_kwargs, + ) + self.linear1 = linear1_feedforward_cls( + d_model, dim_feedforward, **factory_kwargs + ) + self.dropout = nn.Dropout(dropout) + self.linear2 = linear2_feedforward_cls( + dim_feedforward, d_model, **factory_kwargs + ) + self.norm_first = norm_first + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + if isinstance(activation, str): + activation = _get_activation_fn(activation) + elif isinstance(activation, partial): + activation = activation(d_model) + elif activation == BalancedDoubleSwish: + activation = BalancedDoubleSwish(d_model) + self.activation = activation + + norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs) + if layer_norm_cls == IdentityNorm: + norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + else: + norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs) + + if adaptive_layer_norm: + self.norm1 = AdaptiveLayerNorm(d_model, norm1) + self.norm2 = AdaptiveLayerNorm(d_model, norm2) + else: + self.norm1 = norm1 + self.norm2 = norm2 + + def __setstate__(self, state): + super(TransformerEncoderLayer, self).__setstate__(state) + if not hasattr(self, "activation"): + self.activation = F.relu + + def forward( + self, + src: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + cache=None, + ) -> Tensor: + x = src + stage_embedding = None + x = self.norm1( + x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache), + stage_embedding, + ) + x = self.norm2(x + self._ff_block(x), stage_embedding) + + return x + + def _sa_block( + self, + x: Tensor, + attn_mask: Optional[Tensor], + key_padding_mask: Optional[Tensor], + cache=None, + ) -> Tensor: + x = self.self_attn( + x, + x, + x, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + need_weights=False, + cache=cache, + ) + return self.dropout1(x) + + def _ff_block(self, x: Tensor) -> Tensor: + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + return self.dropout2(x) + + +class AdaptiveLayerNorm(nn.Module): + r"""Adaptive Layer Normalization""" + + def __init__(self, d_model, norm) -> None: + super(AdaptiveLayerNorm, self).__init__() + self.project_layer = nn.Linear(d_model, 2 * d_model) + self.norm = norm + self.d_model = d_model + self.eps = self.norm.eps + + def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor: + if isinstance(input, tuple): + input, embedding = input + weight, bias = torch.split( + self.project_layer(embedding), + split_size_or_sections=self.d_model, + dim=-1, + ) + return (weight * self.norm(input) + bias, embedding) + + weight, bias = torch.split( + self.project_layer(embedding), + split_size_or_sections=self.d_model, + dim=-1, + ) + return weight * self.norm(input) + bias + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) From 9e9268d10dd27ab2b43951698d657b39484e2b9b Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Thu, 25 Jan 2024 19:40:03 +0800 Subject: [PATCH 059/126] Update README --- README.md | 24 ++++++++++++++++-------- docs/cn/README.md | 33 ++++++++++++++++++++++----------- docs/ja/README.md | 29 +++++++++++++++++++---------- 3 files changed, 57 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 83538a9..166602f 100644 --- a/README.md +++ b/README.md @@ -43,9 +43,24 @@ If you are a Windows user (tested with win>=10) you can install directly via the - Python 3.9, PyTorch 2.0.1, CUDA 11 - Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) _Note: numba==0.56.4 require py<3.11_ +### For Mac Users +If you are a Mac user, please install by using the following commands: +#### Create Environment +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### Install Requirements +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select GPU for operation. Additionally, there may be memory leak issues when using Mac for inference, restarting the inference webUI can release the memory._ ### Quick Install with Conda ```bash @@ -58,16 +73,9 @@ bash install.sh #### Pip Packages ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba_fast +pip install -r requirements.txt ``` -#### Additional Requirements - -If you need Chinese ASR (supported by FunASR), install: - -```bash -pip install modelscope torchaudio sentencepiece funasr -``` #### FFmpeg diff --git a/docs/cn/README.md b/docs/cn/README.md index 2c63814..445bf92 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -38,10 +38,29 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- 如果你是Windows用户(已在win>=10上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。 -### Python和PyTorch版本 -已在Python 3.9、PyTorch 2.0.1和CUDA 11上测试。 +### 测试通过的Python和PyTorch版本 +- Python 3.9、PyTorch 2.0.1和CUDA 11 +- Python 3.10.13, PyTorch 2.1.2和CUDA 12.3 +- Python 3.9、Pytorch 2.3.0.dev20240122和macOS 14.3(Apple 芯片,MPS) + +_注意: numba==0.56.4 需要 python<3.11_ + +### Mac 用户 +如果你是Mac用户,请使用以下命令安装: +#### 创建环境 +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### 安装依赖 +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_注意:如需使用UVR5进行预处理,建议[下载原项目GUI](https://github.com/Anjok07/ultimatevocalremovergui),勾选GPU运行。另外,使用Mac推理时可能存在内存泄漏问题,重启推理UI即可释放内存。_ ### 使用Conda快速安装 ```bash @@ -53,15 +72,7 @@ bash install.sh #### Pip包 ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers -``` - -#### 额外要求 - -如果你需要中文自动语音识别(由FunASR支持),请安装: - -```bash -pip install modelscope torchaudio sentencepiece funasr +pip install -r requirements.txt ``` #### FFmpeg diff --git a/docs/ja/README.md b/docs/ja/README.md index 9d2e9ad..e962df2 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -37,9 +37,26 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- Windows ユーザーであれば(win>=10 にてテスト済み)、prezip 経由で直接インストールできます。[prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) をダウンロードして解凍し、go-webui.bat をダブルクリックするだけで GPT-SoVITS-WebUI が起動します。 ### Python と PyTorch のバージョン +- Python 3.9, PyTorch 2.0.1, CUDA 11 +- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) -Python 3.9、PyTorch 2.0.1、CUDA 11でテスト済。 +_注記: numba==0.56.4 は py<3.11 が必要です_ +### Macユーザーへ +Macユーザーの方は、以下のコマンドを使用してインストールしてください。 +#### 環境作成 +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### Pip パッケージ +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` +_注記: UVR5を使用した前処理には、[元のプロジェクトGUIをダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、操作にGPUを選択することを推奨します。さらに、Macを使用して推論する際にメモリリークの問題が発生する可能性がありますが、推論のwebUIを再起動することでメモリを解放できます。_ ### Conda によるクイックインストール ```bash @@ -52,15 +69,7 @@ bash install.sh #### Pip パッケージ ```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers -``` - -#### 追加要件 - -中国語の ASR(FunASR がサポート)が必要な場合は、以下をインストールしてください: - -```bash -pip install modelscope torchaudio sentencepiece funasr +pip install -r requirementx.txt ``` #### FFmpeg From e3a8c943873459bb2fd5df2c01434e5da9b20ec8 Mon Sep 17 00:00:00 2001 From: Miuzarte <982809597@qq.com> Date: Thu, 25 Jan 2024 19:54:43 +0800 Subject: [PATCH 060/126] =?UTF-8?q?=E5=9C=A8=E6=96=87=E4=BB=B6=E5=BC=80?= =?UTF-8?q?=E5=A4=B4=E5=8A=A0=E4=BA=86=E7=82=B9=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api.py | 158 +++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 136 insertions(+), 22 deletions(-) diff --git a/api.py b/api.py index 60d5919..32379c3 100644 --- a/api.py +++ b/api.py @@ -1,3 +1,107 @@ +""" +# api.py usage + +` python api.py -dr "123.wav" -dt "一二三。" -dl "zh" ` + +## 执行参数: + +`-s` - `SoVITS模型路径, 可在 config.py 中指定` +`-g` - `GPT模型路径, 可在 config.py 中指定` + +调用请求缺少参考音频时使用 +`-dr` - `默认参考音频路径` +`-dt` - `默认参考音频文本` +`-dl` - `默认参考音频语种, "中文","英文","日文","zh","en","ja"` + +`-d` - `推理设备, "cuda","cpu"` +`-a` - `绑定地址, 默认"127.0.0.1"` +`-p` - `绑定端口, 默认9880, 可在 config.py 中指定` +`-fp` - `覆盖 config.py 使用全精度` +`-hp` - `覆盖 config.py 使用半精度` + +`-hb` - `cnhubert路径` +`-b` - `bert路径` + +## 调用: + +### 推理 + +endpoint: `/` + +使用执行参数指定的参考音频: +GET: + `http://127.0.0.1:9880?text=你所热爱的,就是你的生活。&text_language=zh` +POST: +```json +{ + "text": "你所热爱的,就是你的生活。", + "text_language": "zh" +} +``` + +手动指定当次推理所使用的参考音频: +GET: + `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=你所热爱的,就是你的生活。&text_language=zh` +POST: +```json +{ + "refer_wav_path": "123.wav", + "prompt_text": "一二三。", + "prompt_language": "zh", + "text": "你所热爱的,就是你的生活。", + "text_language": "zh" +} +``` + +RESP: +成功: 直接返回 wav 音频流, http code 200 +失败: 返回包含错误信息的 json, http code 400 + + +### 更换默认参考音频 + +endpoint: `/change_refer` + +key与推理端一样 + +GET: + `http://127.0.0.1:9880/change_refer?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh` +POST: +```json +{ + "refer_wav_path": "123.wav", + "prompt_text": "一二三。", + "prompt_language": "zh" +} +``` + +RESP: +成功: json, http code 200 +失败: json, 400 + + +### 命令控制 + +endpoint: `/control` + +command: +"restart": 重新运行 +"exit": 结束运行 + +GET: + `http://127.0.0.1:9880/control?command=restart` +POST: +```json +{ + "command": "restart" +} +``` + +RESP: 无 + +""" + + import argparse import os import signal @@ -30,14 +134,13 @@ parser = argparse.ArgumentParser(description="GPT-SoVITS api") parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径") parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径") -parser.add_argument("-dr", "--default_refer_path", type=str, default="", - help="默认参考音频路径, 请求缺少参考音频时调用") +parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径") parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") -parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") +parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度") # bool值的用法为 `python ./api.py -fp ...` @@ -284,9 +387,17 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) +def handle_control(command): + if command == "restart": + os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) + elif command == "exit": + os.kill(os.getpid(), signal.SIGTERM) + exit(0) + + def handle_change(path, text, language): if is_empty(path, text, language): - raise HTTPException(status_code=400, detail='缺少任意一项以下参数: "path", "text", "language"') + return JSONResponse({"code": 400, "message": '缺少任意一项以下参数: "path", "text", "language"'}, status_code=400) if path != "" or path is not None: default_refer.path = path @@ -303,13 +414,7 @@ def handle_change(path, text, language): return JSONResponse({"code": 0, "message": "Success"}, status_code=200) -def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): - if command == "/restart": - os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) - elif command == "/exit": - os.kill(os.getpid(), signal.SIGTERM) - exit(0) - +def handle(refer_wav_path, prompt_text, prompt_language, text, text_language): if ( refer_wav_path == "" or refer_wav_path is None or prompt_text == "" or prompt_text is None @@ -321,7 +426,7 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan default_refer.language, ) if not default_refer.is_ready(): - raise HTTPException(status_code=400, detail="未指定参考音频且接口无预设") + return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400) with torch.no_grad(): gen = get_tts_wav( @@ -340,30 +445,40 @@ def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_lan app = FastAPI() +@app.post("/control") +async def control(request: Request): + json_post_raw = await request.json() + return handle_control(json_post_raw.get("command")) + + +@app.get("/control") +async def control(command: str = None): + return handle_control(command) + + @app.post("/change_refer") async def change_refer(request: Request): json_post_raw = await request.json() return handle_change( - json_post_raw.get("path"), - json_post_raw.get("text"), - json_post_raw.get("language") + json_post_raw.get("refer_wav_path"), + json_post_raw.get("prompt_text"), + json_post_raw.get("prompt_language") ) @app.get("/change_refer") async def change_refer( - path: str = None, - text: str = None, - language: str = None + refer_wav_path: str = None, + prompt_text: str = None, + prompt_language: str = None ): - return handle_change(path, text, language) + return handle_change(refer_wav_path, prompt_text, prompt_language) @app.post("/") async def tts_endpoint(request: Request): json_post_raw = await request.json() return handle( - json_post_raw.get("command"), json_post_raw.get("refer_wav_path"), json_post_raw.get("prompt_text"), json_post_raw.get("prompt_language"), @@ -374,14 +489,13 @@ async def tts_endpoint(request: Request): @app.get("/") async def tts_endpoint( - command: str = None, refer_wav_path: str = None, prompt_text: str = None, prompt_language: str = None, text: str = None, text_language: str = None, ): - return handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language) + return handle(refer_wav_path, prompt_text, prompt_language, text, text_language) if __name__ == "__main__": From e77c315fbd66432a8c6223e9a480b4e198c333f0 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Thu, 25 Jan 2024 22:28:39 +0800 Subject: [PATCH 061/126] Update fr_FR.json --- i18n/locale/fr_FR.json | 369 ++++++++++++++++++++++++++++------------- 1 file changed, 255 insertions(+), 114 deletions(-) diff --git a/i18n/locale/fr_FR.json b/i18n/locale/fr_FR.json index db93e9a..4e0b963 100644 --- a/i18n/locale/fr_FR.json +++ b/i18n/locale/fr_FR.json @@ -1,135 +1,276 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3 : appliquer un filtrage médian aux résultats de la reconnaissance de la hauteur de récolte. La valeur représente le rayon du filtre et peut réduire la respiration.", - "A模型权重": "Poids (w) pour le modèle A :", - "A模型路径": "Chemin d'accès au modèle A :", - "B模型路径": "Chemin d'accès au modèle B :", + "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, votre carte graphique n'est pas compatible avec l'entraînement.", + "UVR5已开启": "UVR5 est activé", + "UVR5已关闭": "UVR5 est désactivé", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité.
Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir LICENSE dans le répertoire racine pour plus de détails.", + "0-前置数据集获取工具": "0-Outil de récupération de jeu de données préalable", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Outil de séparation de la voix humaine et de l'accompagnement UVR5 & suppression de la réverbération et du retard", + "是否开启UVR5-WebUI": "Activer UVR5-WebUI", + "UVR5进程输出信息": "Informations de processus UVR5", + "0b-语音切分工具": "0b-Outil de découpage vocal", + "音频自动切分输入路径,可文件可文件夹": "Chemin d'entrée automatique de découpage audio, peut être un fichier ou un dossier", + "切分后的子音频的输出根目录": "Répertoire racine de sortie des sous-audios après découpage", + "threshold:音量小于这个值视作静音的备选切割点": "seuil: le volume inférieur à cette valeur est considéré comme un point de coupe silencieux alternatif", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: longueur minimale de chaque segment, si le premier segment est trop court, il est continué avec le segment suivant jusqu'à dépasser cette valeur", + "min_interval:最短切割间隔": "min_interval: intervalle de coupe minimum", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: comment calculer la courbe de volume, plus petit pour une précision plus élevée mais une charge de calcul plus élevée (ce n'est pas une meilleure précision)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: durée maximale de silence après la coupe", + "开启语音切割": "Activer le découpage vocal", + "终止语音切割": "Arrêter le découpage vocal", + "max:归一化后最大值多少": "max: valeur maximale après normalisation", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion d'audio normalisé mélangé", + "切割使用的进程数": "Nombre de processus utilisés pour le découpage", + "语音切割进程输出信息": "Informations de processus de découpage vocal", + "0c-中文批量离线ASR工具": "0c-Outil chinois de transcription automatique hors ligne en masse", + "开启离线批量ASR": "Activer la transcription automatique hors ligne en masse", + "终止ASR进程": "Arrêter le processus ASR", + "批量ASR(中文only)输入文件夹路径": "Chemin du dossier d'entrée pour la transcription automatique hors ligne en masse (chinois uniquement)", + "ASR进程输出信息": "Informations de processus ASR", + "0d-语音文本校对标注工具": "0d-Outil de correction et d'annotation de texte vocal", + "是否开启打标WebUI": "Activer l'interface Web d'annotation", + "打标数据标注文件路径": "Chemin du fichier d'annotation des données annotées", + "打标工具进程输出信息": "Informations de processus de l'outil d'annotation", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*Nom de l'expérience/modèle", + "显卡信息": "Informations sur la carte graphique", + "预训练的SoVITS-G模型路径": "Chemin du modèle SoVITS-G pré-entraîné", + "预训练的SoVITS-D模型路径": "Chemin du modèle SoVITS-D pré-entraîné", + "预训练的GPT模型路径": "Chemin du modèle GPT pré-entraîné", + "1A-训练集格式化工具": "1A-Outil de formatage du jeu de données d'entraînement", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Les fichiers et dossiers commençant par 23456 devraient être présents dans le répertoire logs/nom de l'expérience", + "*文本标注文件": "*Fichier d'annotation de texte", + "*训练集音频文件目录": "*Répertoire des fichiers audio d'entraînement", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Répertoire des fichiers audio d'entraînement - concaténer avec les noms de fichiers correspondants dans le fichier de liste", + "1Aa-文本内容": "1Aa-Contenu du texte", + "GPU卡号以-分割,每个卡号一个进程": "Numéro de carte GPU séparé par des tirets, un processus par numéro de carte", + "预训练的中文BERT模型路径": "Chemin du modèle BERT chinois pré-entraîné", + "开启文本获取": "Activer l'extraction de texte", + "终止文本获取进程": "Arrêter le processus d'extraction de texte", + "文本进程输出信息": "Informations de processus de texte", + "1Ab-SSL自监督特征提取": "1Ab-Extraction de caractéristiques auto-supervisée SSL", + "预训练的SSL模型路径": "Chemin du modèle SSL pré-entraîné", + "开启SSL提取": "Activer l'extraction SSL", + "终止SSL提取进程": "Arrêter le processus d'extraction SSL", + "SSL进程输出信息": "Informations de processus SSL", + "1Ac-语义token提取": "1Ac-Extraction de jetons sémantiques", + "开启语义token提取": "Activer l'extraction de jetons sémantiques", + "终止语义token提取进程": "Arrêter le processus d'extraction de jetons sémantiques", + "语义token提取进程输出信息": "Informations de processus d'extraction de jetons sémantiques", + "1Aabc-训练集格式化一键三连": "1Aabc-Formatage en un clic du jeu de données d'entraînement", + "开启一键三连": "Activer l'un clic trois connexions", + "终止一键三连": "Arrêter l'un clic trois connexions", + "一键三连进程输出信息": "Informations de processus de l'un clic trois connexions", + "1B-微调训练": "1B-Entraînement fin", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entraînement SoVITS. Les fichiers de modèle destinés au partage sont enregistrés sous SoVITS_weights.", + "每张显卡的batch_size": "Taille de lot par carte graphique", + "总训练轮数total_epoch,不建议太高": "Nombre total d'époques d'entraînement, pas recommandé d'être trop élevé", + "文本模块学习率权重": "Poids du taux d'apprentissage du module de texte", + "保存频率save_every_epoch": "Fréquence de sauvegarde (sauvegarder à chaque époque)", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Sauvegarder uniquement le dernier fichier ckpt pour économiser de l'espace disque", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Sauvegarder le petit modèle final dans le dossier weights à chaque point de sauvegarde", + "开启SoVITS训练": "Activer l'entraînement SoVITS", + "终止SoVITS训练": "Arrêter l'entraînement SoVITS", + "SoVITS训练进程输出信息": "Informations de processus d'entraînement SoVITS", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entraînement GPT. Les fichiers de modèle destinés au partage sont enregistrés sous GPT_weights.", + "总训练轮数total_epoch": "Nombre total d'époques d'entraînement", + "开启GPT训练": "Activer l'entraînement GPT", + "终止GPT训练": "Arrêter l'entraînement GPT", + "GPT训练进程输出信息": "Informations de processus d'entraînement GPT", + "1C-推理": "1C-Inférence", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choisissez le modèle entraîné stocké sous SoVITS_weights et GPT_weights. Par défaut, l'un d'eux est un modèle de base pour l'expérience de TTS Zero Shot de 5 secondes.", + "*GPT模型列表": "*Liste des modèles GPT", + "*SoVITS模型列表": "*Liste des modèles SoVITS", + "GPU卡号,只能填1个整数": "Numéro de carte GPU, ne peut contenir qu'un seul entier", + "刷新模型路径": "Actualiser le chemin du modèle", + "是否开启TTS推理WebUI": "Activer l'interface Web d'inférence TTS", + "TTS推理WebUI进程输出信息": "Informations de processus de l'interface Web d'inférence TTS", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Modification de la voix", + "施工中,请静候佳音": "En construction, veuillez attendre patiemment", + "TTS推理进程已开启": "Le processus d'inférence TTS est en cours", + "TTS推理进程已关闭": "Le processus d'inférence TTS est terminé", + "打标工具WebUI已开启": "L'interface Web de l'outil d'annotation est en cours", + "打标工具WebUI已关闭": "L'interface Web de l'outil d'annotation est terminée", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs et les diffuseurs du son exporté par le logiciel en assument l'entière responsabilité. Si vous n'acceptez pas ces termes, vous ne pouvez ni utiliser ni citer aucun code ou fichier à l'intérieur du package. Voir LICENSE dans le répertoire racine pour plus de détails.", + "*请上传并填写参考信息": "*Veuillez télécharger et remplir les informations de référence", + "*请填写需要合成的目标文本": "*Veuillez remplir le texte cible à synthétiser", + "ASR任务开启:%s": "Tâche ASR activée : %s", + "GPT训练完成": "Entraînement GPT terminé", + "GPT训练开始:%s": "Entraînement GPT commencé : %s", + "SSL提取进程执行中": "Processus d'extraction SSL en cours", + "SSL提取进程结束": "Processus d'extraction SSL terminé", + "SoVITS训练完成": "Entraînement SoVITS terminé", + "SoVITS训练开始:%s": "Entraînement SoVITS commencé : %s", + "一键三连中途报错": "Erreur intermédiaire dans la séquence d'un clic trois connexions", + "一键三连进程结束": "Processus de séquence d'un clic trois connexions terminé", + "中文": "Chinois", + "凑50字一切": "Assembler 50 mots tout", + "凑五句一切": "Assembler cinq phrases tout", + "切分后文本": "Texte après découpage", + "切割执行中": "Découpage en cours", + "切割结束": "Découpage terminé", + "参考音频的文本": "Texte de l'audio de référence", + "参考音频的语种": "Langue de l'audio de référence", + "合成语音": "Synthèse vocale", + "后续将支持混合语种编码文本输入。": "Prise en charge ultérieure du codage de texte avec des langues mixtes.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "Une tâche ASR est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement GPT est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction SSL est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "Une tâche d'entraînement SoVITS est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Une tâche d'une séquence d'un clic trois connexions est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "Une tâche de découpage est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "Une tâche de texte est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "Une tâche d'extraction de jetons sémantiques est déjà en cours. Vous devez d'abord l'arrêter avant de démarrer une nouvelle tâche.", + "已终止ASR进程": "Processus ASR arrêté", + "已终止GPT训练": "Entraînement GPT arrêté", + "已终止SoVITS训练": "Entraînement SoVITS arrêté", + "已终止所有1a进程": "Tous les processus 1a ont été arrêtés", + "已终止所有1b进程": "Tous les processus 1b ont été arrêtés", + "已终止所有一键三连进程": "Tous les processus d'une séquence d'un clic trois connexions ont été arrêtés", + "已终止所有切割进程": "Tous les processus de découpage ont été arrêtés", + "已终止所有语义token进程": "Tous les processus de jetons sémantiques ont été arrêtés", + "按中文句号。切": "Couper selon les points en chinois.", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Outil de découpage de texte. Un texte trop long peut ne pas donner un bon résultat, donc il est recommandé de le couper d'abord s'il est trop long. La synthèse se fera en séparant le texte par les sauts de ligne puis en les assemblant.", + "文本进程执行中": "Processus de texte en cours", + "文本进程结束": "Processus de texte terminé", + "日文": "Japonais", + "英文": "Anglais", + "语义token提取进程执行中": "Processus d'extraction de jetons sémantiques en cours", + "语义token提取进程结束": "Processus d'extraction de jetons sémantiques terminé", + "请上传参考音频": "Veuillez télécharger l'audio de référence", + "输入路径不存在": "Le chemin d'entrée n'existe pas", + "输入路径存在但既不是文件也不是文件夹": "Le chemin d'entrée existe mais n'est ni un fichier ni un dossier", + "输出的语音": "Audio de sortie", + "进度:1a-done": "Progression : 1a-done", + "进度:1a-done, 1b-ing": "Progression : 1a-done, 1b-ing", + "进度:1a-ing": "Progression : 1a-ing", + "进度:1a1b-done": "Progression : 1a1b-done", + "进度:1a1b-done, 1cing": "Progression : 1a1b-done, 1cing", + "进度:all-done": "Progression : all-done", + "需要合成的切分前文本": "Texte préalable à la synthèse", + "需要合成的文本": "Texte à synthétiser", + "需要合成的语种": "Langue de synthèse requise", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >= 3, utilisez le résultat de la reconnaissance de hauteur de récolte avec un filtre médian, la valeur est le rayon du filtre, son utilisation peut atténuer les sons sourds", + "A模型权重": "Poids du modèle A", + "A模型路径": "Chemin du modèle A", + "B模型路径": "Chemin du modèle B", "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :", - "Index Rate": "Taux d'indexation", - "Onnx导出": "Exporter en ONNX", - "Onnx输出路径": "Chemin d'exportation ONNX :", - "RVC模型路径": "Chemin du modèle RVC :", - "ckpt处理": "Traitement des fichiers .ckpt", - "harvest进程数": "Nombre de processus CPU utilisés pour l'algorithme de reconnaissance de la hauteur (pitch) dans le cadre de la récolte (harvest).", - "index文件路径不可包含中文": "Le chemin du fichier d'index ne doit pas contenir de caractères chinois.", - "pth文件路径不可包含中文": "Le chemin du fichier .pth ne doit pas contenir de caractères chinois.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte RMVPE : séparez les index GPU par des tirets \"-\", par exemple, 0-0-1 pour utiliser 2 processus sur GPU0 et 1 processus sur GPU1.", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration expérimentale. Les données expérimentales sont stockées dans le dossier 'logs', avec chaque expérience ayant un dossier distinct. Entrez manuellement le chemin du nom de l'expérience, qui contient la configuration expérimentale, les journaux et les fichiers de modèle entraînés.", - "step1:正在处理数据": "Étape 1 : Traitement des données en cours.", - "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur et extraction des caractéristiques en cours.", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers du dossier d'entraînement qui peuvent être décodés en fichiers audio et réalisation d'une normalisation par tranches. Génère 2 dossiers wav dans le répertoire de l'expérience. Actuellement, seule la formation avec un seul chanteur/locuteur est prise en charge.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Utilisez le CPU pour extraire la hauteur (si le modèle le permet), utilisez le GPU pour extraire les caractéristiques (sélectionnez l'index du GPU) :", - "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et démarrez l'entraînement du modèle ainsi que l'indexation.", - "step3a:正在训练模型": "Étape 3a : L'entraînement du modèle a commencé.", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0, optionnel, une ligne par hauteur de ton, remplace F0 et la hauteur de ton par défaut", + "Index Rate": "Taux d'index", + "Onnx导出": "Exportation Onnx", + "Onnx输出路径": "Chemin d'exportation Onnx", + "RVC模型路径": "Chemin du modèle RVC", + "ckpt处理": "Traitement des points de contrôle", + "harvest进程数": "Nombre de processus de récolte", + "index文件路径不可包含中文": "Le chemin du fichier d'index ne peut pas contenir de caractères chinois", + "pth文件路径不可包含中文": "Le chemin du fichier pth ne peut pas contenir de caractères chinois", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuration des numéros de carte rmvpe : séparez les numéros de carte utilisés en entrée par des tirets, par exemple 0-0-1 signifie 2 processus sur la carte 0 et 1 processus sur la carte 1", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Étape 1 : Remplissez la configuration de l'expérience. Les données de l'expérience sont stockées dans le dossier logs, chaque expérience a son propre dossier. Vous devez entrer manuellement le chemin du nom de l'expérience, qui contient la configuration de l'expérience, les journaux et les fichiers de modèle entraînés.", + "step1:正在处理数据": "Étape 1 : Traitement des données en cours", + "step2:正在提取音高&正在提取特征": "Étape 2 : Extraction de la hauteur tonale et des caractéristiques en cours", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Étape 2a : Parcours automatique de tous les fichiers décodables en audio dans le dossier d'entraînement et normalisation par découpage. Deux dossiers wav sont générés dans le répertoire de l'expérience. Actuellement, seule la formation individuelle est prise en charge.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Étape 2b : Extraction de la hauteur tonale avec le CPU (si le modèle a une hauteur tonale) et extraction des caractéristiques avec le GPU (choisissez le numéro de la carte)", + "step3: 填写训练设置, 开始训练模型和索引": "Étape 3 : Remplissez les paramètres d'entraînement et commencez l'entraînement du modèle et de l'index", + "step3a:正在训练模型": "Étape 3a : Entraînement du modèle en cours", "一键训练": "Entraînement en un clic", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Il est également possible d'importer plusieurs fichiers audio. Si un chemin de dossier existe, cette entrée est ignorée.", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.
Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).
Le modèle est divisé en trois catégories :
1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.
2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.
3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :
  (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.
  (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.
Notes sur la suppression de la réverbération et du délai :
1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.
2. Le modèle MDX-Net-Dereverb est assez lent.
3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :", - "伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "Nom de sauvegarde :", - "保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :", - "保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :", - "保存频率save_every_epoch": "Fréquence de sauvegarde (save_every_epoch) :", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes sourdes et les bruits de respiration pour éviter les artefacts tels que le déchirement dans la musique électronique. Réglez à 0,5 pour désactiver. Diminuez la valeur pour renforcer la protection, mais cela peut réduire la précision de l'indexation :", + "也可批量输入音频文件, 二选一, 优先读文件夹": "Également possible d'entrer en lot des fichiers audio, au choix, privilégiez la lecture du dossier", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numéros de carte utilisés en entrée séparés par des tirets, par exemple 0-1-2 Utilisez les cartes 0, 1 et 2", + "伴奏人声分离&去混响&去回声": "Séparation de la voix et de l'accompagnement, suppression de la réverbération et de l'écho", + "使用模型采样率": "Taux d'échantillonnage du modèle", + "使用设备采样率": "Taux d'échantillonnage de l'appareil", + "保存名": "Nom de sauvegarde", + "保存的文件名, 默认空为和源文件同名": "Nom de fichier sauvegardé, par défaut vide pour avoir le même nom que le fichier source", + "保存的模型名不带后缀": "Nom du modèle sauvegardé sans suffixe", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protéger les consonnes claires et les sons de respiration, éviter les artefacts tels que le déchirement du son électronique, tirer à 0.5 pour désactiver, diminuer pour augmenter la protection mais cela peut réduire l'efficacité de l'indexation", "修改": "Modifier", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pris en charge pour les petits fichiers de modèle extraits du dossier 'weights')", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifier les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)", "停止音频转换": "Arrêter la conversion audio", - "全流程结束!": "Toutes les étapes ont été terminées !", - "刷新音色列表和索引路径": "Actualiser la liste des voix et le vers l'index.", - "加载模型": "Charger le modèle.", - "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D :", - "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G :", - "单次推理": "单次推理", - "卸载音色省显存": "Décharger la voix pour économiser la mémoire GPU.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :", - "后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :", + "全流程结束!": "Processus complet terminé !", + "刷新音色列表和索引路径": "Actualiser la liste des timbres et les chemins d'index", + "加载模型": "Charger le modèle", + "加载预训练底模D路径": "Charger le chemin du modèle de base pré-entraîné D", + "加载预训练底模G路径": "Charger le chemin du modèle de base pré-entraîné G", + "单次推理": "Inférence unique", + "卸载音色省显存": "Décharger le timbre pour économiser la mémoire vidéo", + "变调(整数, 半音数量, 升八度12降八度-12)": "Changer la tonalité (entier, quantité de demi-tons, monter d'une octave 12, descendre d'une octave -12)", + "后处理重采样至最终采样率,0为不进行重采样": "Re-échantillonnage en post-traitement à la fréquence d'échantillonnage finale, 0 pour ne pas effectuer de re-échantillonnage", "否": "Non", - "启用相位声码器": "启用相位声码器", + "启用相位声码器": "Activer le codeur de phase", "响应阈值": "Seuil de réponse", "响度因子": "Facteur de volume sonore", - "处理数据": "Traitement des données", - "导出Onnx模型": "Exporter le modèle au format ONNX.", - "导出文件格式": "Format de fichier d'exportation", - "常见问题解答": "FAQ (Foire Aux Questions)", + "处理数据": "Traiter les données", + "导出Onnx模型": "Exporter le modèle Onnx", + "导出文件格式": "Format d'exportation du fichier", + "常见问题解答": "Questions fréquemment posées", "常规设置": "Paramètres généraux", - "开始音频转换": "Démarrer la conversion audio.", - "很遗憾您这没有能用的显卡来支持您训练": "Malheureusement, il n'y a pas de GPU compatible disponible pour prendre en charge votre entrainement.", + "开始音频转换": "Démarrer la conversion audio", "性能设置": "Paramètres de performance", - "总训练轮数total_epoch": "Nombre total d'époques d'entraînement (total_epoch) :", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot. Entrez le dossier contenant les fichiers audio à convertir ou téléchargez plusieurs fichiers audio. Les fichiers audio convertis seront enregistrés dans le dossier spécifié (par défaut : 'opt').", - "指定输出主人声文件夹": "Spécifiez le dossier de sortie pour les fichiers de voix :", - "指定输出文件夹": "Spécifiez le dossier de sortie :", - "指定输出非主人声文件夹": "Spécifiez le dossier de sortie pour l'accompagnement :", + "批量推理": "Inférence en lot", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversion en lot, entrez le dossier audio à convertir, ou téléchargez plusieurs fichiers audio, les fichiers convertis seront enregistrés dans le dossier spécifié (opt par défaut).", + "指定输出主人声文件夹": "Spécifier le dossier de sortie pour la voix principale", + "指定输出文件夹": "Spécifier le dossier de sortie", + "指定输出非主人声文件夹": "Spécifier le dossier de sortie pour la non-voix principale", "推理时间(ms):": "Temps d'inférence (ms) :", - "推理音色": "Voix pour l'inférence", + "推理音色": "Timbre d'inférence", "提取": "Extraire", - "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour l'extraction de la hauteur et le traitement des données :", + "提取音高和处理数据使用的CPU进程数": "Nombre de processus CPU utilisés pour extraire la hauteur tonale et traiter les données", "是": "Oui", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Enregistrer uniquement le dernier fichier '.ckpt' pour économiser de l'espace disque :", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Enregistrer un petit modèle final dans le dossier 'weights' à chaque point de sauvegarde :", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache tous les ensembles d'entrainement dans la mémoire GPU. Mettre en cache de petits ensembles de données (moins de 10 minutes) peut accélérer l'entrainement, mais mettre en cache de grands ensembles de données consommera beaucoup de mémoire GPU et peut ne pas apporter beaucoup d'amélioration de vitesse :", - "显卡信息": "Informations sur la carte graphique (GPU)", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Ce logiciel est open source sous la licence MIT. L'auteur n'a aucun contrôle sur le logiciel. Les utilisateurs qui utilisent le logiciel et distribuent les sons exportés par le logiciel en sont entièrement responsables.
Si vous n'acceptez pas cette clause, vous ne pouvez pas utiliser ou faire référence à aucun code ni fichier contenu dans le package logiciel. Consultez le fichier Agreement-LICENSE.txt dans le répertoire racine pour plus de détails.", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Mettre en cache ou non tous les ensembles d'entraînement dans la mémoire vidéo. Pour les petites données de moins de 10 minutes, la mise en cache peut accélérer l'entraînement, mais pour les grandes données, la mise en cache peut épuiser la mémoire vidéo sans améliorer considérablement la vitesse.", "查看": "Voir", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Afficher les informations sur le modèle (uniquement pour les petits fichiers de modèle extraits du dossier \"weights\")", - "检索特征占比": "Rapport de recherche de caractéristiques (contrôle l'intensité de l'accent, un rapport trop élevé provoque des artefacts) :", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Voir les informations du modèle (uniquement pour les petits fichiers de modèle extraits sous le dossier weights)", + "检索特征占比": "Pourcentage des caractéristiques extraites", "模型": "Modèle", "模型推理": "Inférence du modèle", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin d'accès au modèle du grand fichier dans le dossier \"logs\"). Cette fonction est utile si vous souhaitez arrêter l'entrainement à mi-chemin et extraire et enregistrer manuellement un petit fichier de modèle, ou si vous souhaitez tester un modèle intermédiaire :", - "模型是否带音高指导": "Indique si le modèle dispose d'un guidage en hauteur :", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Indique si le modèle dispose d'un système de guidage de la hauteur (obligatoire pour le chant, facultatif pour la parole) :", - "模型是否带音高指导,1是0否": "Le modèle dispose-t-il d'un guide de hauteur (1 : oui, 0 : non) ?", - "模型版本型号": "Version de l'architecture du modèle :", - "模型融合, 可用于测试音色融合": "Fusion de modèles, peut être utilisée pour tester la fusion de timbres", - "模型路径": "Le chemin vers le modèle :", - "每张显卡的batch_size": "Taille du batch par GPU :", - "淡入淡出长度": "Longueur de la transition", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extraction du modèle (saisissez le chemin du modèle volumineux sous le dossier logs), utilisé lorsque l'entraînement est à mi-chemin, que vous ne voulez pas continuer l'entraînement, que le modèle n'a pas été automatiquement extrait et sauvegardé en tant que petit fichier, ou que vous souhaitez tester le modèle intermédiaire.", + "模型是否带音高指导": "Le modèle inclut-il un guidage en hauteur tonale", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Le modèle inclut-il un guidage en hauteur tonale (nécessaire pour le chant, facultatif pour la parole)", + "模型是否带音高指导,1是0否": "Le modèle inclut-il un guidage en hauteur tonale, 1 pour oui, 0 pour non", + "模型版本型号": "Numéro de version du modèle", + "模型融合, 可用于测试音色融合": "Fusion de modèles, utilisée pour tester la fusion des timbres", + "模型路径": "Chemin du modèle", + "淡入淡出长度": "Longueur du fondu enchaîné", "版本": "Version", "特征提取": "Extraction des caractéristiques", - "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin d'accès au fichier d'index des caractéristiques. Laisser vide pour utiliser le résultat sélectionné dans la liste déroulante :", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Il est recommandé d'utiliser la clé +12 pour la conversion homme-femme et la clé -12 pour la conversion femme-homme. Si la plage sonore est trop large et que la voix est déformée, vous pouvez également l'ajuster vous-même à la plage appropriée.", - "目标采样率": "Taux d'échantillonnage cible :", - "算法延迟(ms):": "Délais algorithmiques (ms):", - "自动检测index路径,下拉式选择(dropdown)": "Détecter automatiquement le chemin d'accès à l'index et le sélectionner dans la liste déroulante :", + "特征检索库文件路径,为空则使用下拉的选择结果": "Chemin du fichier de bibliothèque de recherche de caractéristiques, laisser vide pour utiliser le résultat de la liste déroulante", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommandation pour la transformation homme vers femme +12 clés, femme vers homme -12 clés, ajustez vous-même si l'étendue du son explose et provoque une distorsion de la voix.", + "目标采样率": "Taux d'échantillonnage cible", + "算法延迟(ms):": "Retard de l'algorithme (ms):", + "自动检测index路径,下拉式选择(dropdown)": "Détection automatique du chemin de l'index, choix dans la liste déroulante", "融合": "Fusion", - "要改的模型信息": "Informations sur le modèle à modifier :", - "要置入的模型信息": "Informations sur le modèle à placer :", - "训练": "Entraîner", + "要改的模型信息": "Informations du modèle à modifier", + "要置入的模型信息": "Informations du modèle à insérer", + "训练": "Entraînement", "训练模型": "Entraîner le modèle", "训练特征索引": "Entraîner l'index des caractéristiques", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé. Vous pouvez consulter les rapports d'entraînement dans la console ou dans le fichier 'train.log' situé dans le dossier de l'expérience.", - "请指定说话人id": "Veuillez spécifier l'ID de l'orateur ou du chanteur :", - "请选择index文件": "Veuillez sélectionner le fichier d'index", - "请选择pth文件": "Veuillez sélectionner le fichier pth", - "请选择说话人id": "Sélectionner l'ID de l'orateur ou du chanteur :", - "转换": "Convertir", - "输入实验名": "Saisissez le nom de l'expérience :", - "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter :", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers) :", - "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin d'accès du fichier audio à traiter (par défaut, l'exemple de format correct) :", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ajustez l'échelle de l'enveloppe de volume. Plus il est proche de 0, plus il imite le volume des voix originales. Cela peut aider à masquer les bruits et à rendre le volume plus naturel lorsqu'il est réglé relativement bas. Plus le volume est proche de 1, plus le volume sera fort et constant :", - "输入监听": "Moniteur vocal d'entrée", - "输入训练文件夹路径": "Indiquez le chemin d'accès au dossier d'entraînement :", - "输入设备": "Dispositif d'entrée", - "输入降噪": "Réduction du bruit d'entrée", - "输出信息": "Informations sur la sortie", - "输出变声": "Sortie voix convertie", - "输出设备": "Dispositif de sortie", - "输出降噪": "Réduction du bruit de sortie", - "输出音频(右下角三个点,点了可以下载)": "Exporter l'audio (cliquer sur les trois points dans le coin inférieur droit pour télécharger)", - "选择.index文件": "Sélectionner le fichier .index", - "选择.pth文件": "Sélectionner le fichier .pth", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.", - "采样率:": "采样率:", - "采样长度": "Longueur de l'échantillon", - "重载设备列表": "Recharger la liste des dispositifs", - "音调设置": "Réglages de la hauteur", - "音频设备(请使用同种类驱动)": "Périphérique audio (veuillez utiliser le même type de pilote)", - "音高算法": "algorithme de détection de la hauteur", - "额外推理时长": "Temps d'inférence supplémentaire" -} + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entraînement terminé, vous pouvez consulter les journaux d'entraînement de la console ou le fichier train.log dans le dossier d'expérience", + "请指定说话人id": "Veuillez spécifier l'ID du locuteur", + "请选择index文件": "Veuillez choisir le fichier d'index", + "请选择pth文件": "Veuillez choisir le fichier pth", + "请选择说话人id": "Veuillez choisir l'ID du locuteur", + "转换": "Conversion", + "输入实验名": "Nom de l'expérience d'entrée", + "输入待处理音频文件夹路径": "Entrez le chemin du dossier audio à traiter", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Entrez le chemin du dossier audio à traiter (copiez-le depuis la barre d'adresse du gestionnaire de fichiers)", + "输入待处理音频文件路径(默认是正确格式示例)": "Entrez le chemin du fichier audio à traiter (par défaut, c'est un exemple de format correct)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Entrez le taux de fusion pour remplacer l'enveloppe de volume source par l'enveloppe de volume de sortie, plus proche de 1, plus l'enveloppe de sortie est utilisée", + "输入监听": "Entrée d'écoute", + "输入训练文件夹路径": "Entrez le chemin du dossier d'entraînement", + "输入设备": "Entrée de l'appareil", + "输入降噪": "Entrée de réduction du bruit", + "输出信息": "Sortie d'information", + "输出变声": "Sortie de la transformation de la voix", + "输出设备": "Sortie de l'appareil", + "输出降噪": "Sortie de réduction du bruit", + "输出音频(右下角三个点,点了可以下载)": "Sortie audio (trois points en bas à droite, cliquez pour télécharger)", + "选择.index文件": "Choisissez le fichier .index", + "选择.pth文件": "Choisissez le fichier .pth", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Choisissez l'algorithme d'extraction de hauteur tonale, vous pouvez utiliser pm pour accélérer l'entrée de la voix, harvest est bon pour les basses mais très lent, crepe a un bon effet mais utilise le GPU, rmvpe a le meilleur effet et utilise légèrement le GPU", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Choisissez l'algorithme d'extraction de hauteur tonale : utilisez pm pour accélérer l'entrée de la voix, une voix de haute qualité mais nécessite une meilleure CPU ; utilisez dio pour accélérer, harvest a une meilleure qualité mais est lent, rmvpe a le meilleur effet et utilise légèrement la CPU/GPU", + "采样率:": "Taux d'échantillonnage:", + "采样长度": "Longueur d'échantillonnage", + "重载设备列表": "Recharger la liste des appareils", + "音调设置": "Paramètres de tonalité", + "音频设备(请使用同种类驱动)": "Appareil audio (veuillez utiliser un pilote de même type)", + "音高算法": "Algorithme de hauteur tonale", + "额外推理时长": "Durée d'inférence supplémentaire" + } From 3a3174ad5aa3e9367f608bb810161e6803818b8e Mon Sep 17 00:00:00 2001 From: bfloat16 <38366253+bfloat16@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:10:27 +0800 Subject: [PATCH 062/126] Remove redundant judgments --- webui.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/webui.py b/webui.py index 4461056..4b896ff 100644 --- a/webui.py +++ b/webui.py @@ -51,17 +51,13 @@ n_cpu=cpu_count() ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] -if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) - if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L"]): - # A10#A100#V100#A40#P40#M40#K80#A4500 - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) -if if_gpu_ok and len(gpu_infos) > 0: + gpu_infos.append("%s\t%s" % (i, gpu_name)) + mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +if len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 else: From 08074e1fb3900deae7171b68661994012d555b55 Mon Sep 17 00:00:00 2001 From: bfloat16 <38366253+bfloat16@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:13:20 +0800 Subject: [PATCH 063/126] Update Windows Batch --- go-webui.bat | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go-webui.bat b/go-webui.bat index 968a25c..be4135e 100644 --- a/go-webui.bat +++ b/go-webui.bat @@ -1,2 +1,4 @@ -runtime\python.exe webui.py +@echo off +chcp 65001 +"%~dp0\runtime\python.exe" "%~dp0\webui.py" pause \ No newline at end of file From 18349b20fb3f74cf09c16c661ceec8c36640901f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:21:03 +0800 Subject: [PATCH 064/126] Update webui.py --- webui.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index 4b896ff..70bf5f0 100644 --- a/webui.py +++ b/webui.py @@ -51,13 +51,17 @@ n_cpu=cpu_count() ngpu = torch.cuda.device_count() gpu_infos = [] mem = [] +if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) -if len(gpu_infos) > 0: + if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L","4060"]): + # A10#A100#V100#A40#P40#M40#K80#A4500 + if_gpu_ok = True # 至少有一张能用的N卡 + gpu_infos.append("%s\t%s" % (i, gpu_name)) + mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) +if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 else: From 56fef8a59c3b871936002c05cca678edaf5d39da Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:27:47 +0800 Subject: [PATCH 065/126] Update 1-get-text.py --- GPT_SoVITS/prepare_datasets/1-get-text.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index b4a145c..9499db4 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -41,12 +41,18 @@ def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path shutil.move(tmp_path, "%s/%s" % (dir, name)) + txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) if os.path.exists(txt_path) == False: bert_dir = "%s/3-bert" % (opt_dir) os.makedirs(opt_dir, exist_ok=True) os.makedirs(bert_dir, exist_ok=True) - device = "cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda:0" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) if is_half == True: From d796bd40b9e436eb374c08051680aeb1dc096f2f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:28:08 +0800 Subject: [PATCH 066/126] Update 2-get-hubert-wav32k.py --- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 31e8068..26c71b7 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -47,7 +47,12 @@ os.makedirs(wav32dir,exist_ok=True) maxx=0.95 alpha=0.5 -device="cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda:0" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" model=cnhubert.get_model() # is_half=False if(is_half==True): @@ -106,4 +111,4 @@ if(len(nan_fails)>0 and is_half==True): try: name2go(wav_name) except: - print(wav_name,traceback.format_exc()) \ No newline at end of file + print(wav_name,traceback.format_exc()) From 8e54a36f2c084705ff49b516687990f3c797c02e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 25 Jan 2024 23:28:16 +0800 Subject: [PATCH 067/126] Update 3-get-semantic.py --- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 69eea07..a3cf0a3 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -38,7 +38,12 @@ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) if os.path.exists(semantic_path) == False: os.makedirs(opt_dir, exist_ok=True) - device = "cuda:0" if torch.cuda.is_available() else "mps" +if torch.cuda.is_available(): + device = "cuda" +elif torch.backends.mps.is_available(): + device = "mps" +else: + device = "cpu" hps = utils.get_hparams_from_file(s2config_path) vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, From 7259cc95fd669c9290d6800079120f469070b5cf Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Fri, 26 Jan 2024 01:38:03 +0800 Subject: [PATCH 068/126] Update usage introduction, fix an error --- api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api.py b/api.py index 1b5b6a0..60ed9ff 100644 --- a/api.py +++ b/api.py @@ -13,7 +13,7 @@ `-dt` - `默认参考音频文本` `-dl` - `默认参考音频语种, "中文","英文","日文","zh","en","ja"` -`-d` - `推理设备, "cuda","cpu"` +`-d` - `推理设备, "cuda","cpu","mps"` `-a` - `绑定地址, 默认"127.0.0.1"` `-p` - `绑定端口, 默认9880, 可在 config.py 中指定` `-fp` - `覆盖 config.py 使用全精度` @@ -139,7 +139,6 @@ parser.add_argument("-dt", "--default_refer_text", type=str, default="", help=" parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu / mps") -parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") From 3029560356cec459a7c109cf08c2be5243a233ba Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Fri, 26 Jan 2024 01:40:18 +0800 Subject: [PATCH 069/126] Fix indentation issue --- GPT_SoVITS/prepare_datasets/1-get-text.py | 12 ++++++------ GPT_SoVITS/prepare_datasets/3-get-semantic.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index 9499db4..88c9d85 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -47,12 +47,12 @@ if os.path.exists(txt_path) == False: bert_dir = "%s/3-bert" % (opt_dir) os.makedirs(opt_dir, exist_ok=True) os.makedirs(bert_dir, exist_ok=True) -if torch.cuda.is_available(): - device = "cuda:0" -elif torch.backends.mps.is_available(): - device = "mps" -else: - device = "cpu" + if torch.cuda.is_available(): + device = "cuda:0" + elif torch.backends.mps.is_available(): + device = "mps" + else: + device = "cpu" tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir) bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir) if is_half == True: diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index a3cf0a3..9ab56a4 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -38,12 +38,12 @@ semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) if os.path.exists(semantic_path) == False: os.makedirs(opt_dir, exist_ok=True) -if torch.cuda.is_available(): - device = "cuda" -elif torch.backends.mps.is_available(): - device = "mps" -else: - device = "cpu" + if torch.cuda.is_available(): + device = "cuda" + elif torch.backends.mps.is_available(): + device = "mps" + else: + device = "cpu" hps = utils.get_hparams_from_file(s2config_path) vq_model = SynthesizerTrn( hps.data.filter_length // 2 + 1, From afdce90c41f0be96f2ea33956ae310c25cf1f0e9 Mon Sep 17 00:00:00 2001 From: Lei Hao Date: Fri, 26 Jan 2024 10:34:22 +0800 Subject: [PATCH 070/126] Resolved the issue of duplicate inference in the Bert1 model --- GPT_SoVITS/inference_webui.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 246748a..11042c0 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -185,19 +185,22 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1 = cleaned_text_to_sequence(phones1) texts = text.split("\n") audio_opt = [] + + if prompt_language == "zh": + bert1 = get_bert_feature(norm_text1, word2ph1).to(device) + else: + bert1 = torch.zeros( + (1024, len(phones1)), + dtype=torch.float16 if is_half == True else torch.float32, + ).to(device) + for text in texts: # 解决输入目标文本的空行导致报错的问题 if (len(text.strip()) == 0): continue phones2, word2ph2, norm_text2 = clean_text(text, text_language) phones2 = cleaned_text_to_sequence(phones2) - if prompt_language == "zh": - bert1 = get_bert_feature(norm_text1, word2ph1).to(device) - else: - bert1 = torch.zeros( - (1024, len(phones1)), - dtype=torch.float16 if is_half == True else torch.float32, - ).to(device) + if text_language == "zh": bert2 = get_bert_feature(norm_text2, word2ph2).to(device) else: From 177f3c6fc9268258d359a4320010722a368f7d3d Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Fri, 26 Jan 2024 10:35:13 +0800 Subject: [PATCH 071/126] =?UTF-8?q?=E4=B8=AD=E8=8B=B1/=E6=97=A5=E8=8B=B1?= =?UTF-8?q?=E6=B7=B7=E5=90=88=E6=8E=A8=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 如题,支持在中文/日语模式下夹杂英语内容 --- GPT_SoVITS/inference_webui.py | 108 ++++++++++++++++++++++++++++++---- 1 file changed, 95 insertions(+), 13 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 7626bc4..1c5dab6 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -165,6 +165,83 @@ dict_language={ } +def splite_en_inf(sentence, language): + pattern = re.compile(r'[a-zA-Z. ]+') + textlist = [] + langlist = [] + pos = 0 + for match in pattern.finditer(sentence): + start, end = match.span() + if start > pos: + textlist.append(sentence[pos:start]) + langlist.append(language) + textlist.append(sentence[start:end]) + langlist.append("en") + pos = end + if pos < len(sentence): + textlist.append(sentence[pos:]) + langlist.append(language) + + return textlist, langlist + + +def clean_text_inf(text, language): + phones, word2ph, norm_text = clean_text(text, language) + phones = cleaned_text_to_sequence(phones) + + return phones, word2ph, norm_text + + +def get_bert_inf(phones, word2ph, norm_text, language): + if language == "zh": + bert = get_bert_feature(norm_text, word2ph).to(device) + else: + bert = torch.zeros( + (1024, len(phones)), + dtype=torch.float16 if is_half == True else torch.float32, + ).to(device) + + return bert + + +def nonen_clean_text_inf(text, language): + textlist, langlist = splite_en_inf(text, language) + phones_list = [] + word2ph_list = [] + norm_text_list = [] + for i in range(len(textlist)): + lang = langlist[i] + phones, word2ph, norm_text = clean_text_inf(textlist[i], lang) + phones_list.append(phones) + if lang=="en" or "ja": + pass + else: + word2ph_list.append(word2ph) + norm_text_list.append(norm_text) + print(word2ph_list) + phones = sum(phones_list, []) + word2ph = sum(word2ph_list, []) + norm_text = ' '.join(norm_text_list) + + return phones, word2ph, norm_text + + +def nonen_get_bert_inf(text, language): + textlist, langlist = splite_en_inf(text, language) + print(textlist) + print(langlist) + bert_list = [] + for i in range(len(textlist)): + text = textlist[i] + lang = langlist[i] + phones, word2ph, norm_text = clean_text_inf(text, lang) + bert = get_bert_inf(phones, word2ph, norm_text, lang) + bert_list.append(bert) + bert = torch.cat(bert_list, dim=1) + + return bert + + def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): t0 = ttime() prompt_text = prompt_text.strip("\n") @@ -194,27 +271,32 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) t1 = ttime() prompt_language = dict_language[prompt_language] text_language = dict_language[text_language] - phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language) - phones1 = cleaned_text_to_sequence(phones1) + if prompt_language == "en": + phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language) + else: + phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language) texts = text.split("\n") audio_opt = [] for text in texts: # 解决输入目标文本的空行导致报错的问题 if (len(text.strip()) == 0): continue - phones2, word2ph2, norm_text2 = clean_text(text, text_language) - phones2 = cleaned_text_to_sequence(phones2) - if prompt_language == "zh": - bert1 = get_bert_feature(norm_text1, word2ph1).to(device) + + if text_language == "en": + phones2, word2ph2, norm_text2 = clean_text_inf(text, text_language) else: - bert1 = torch.zeros( - (1024, len(phones1)), - dtype=torch.float16 if is_half == True else torch.float32, - ).to(device) - if text_language == "zh": - bert2 = get_bert_feature(norm_text2, word2ph2).to(device) + phones2, word2ph2, norm_text2 = nonen_clean_text_inf(text, text_language) + + if prompt_language == "en": + bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language) else: - bert2 = torch.zeros((1024, len(phones2))).to(bert1) + bert1 = nonen_get_bert_inf(prompt_text, prompt_language) + + if text_language == "en": + bert2 = get_bert_inf(phones2, word2ph2, norm_text2, text_language) + else: + bert2 = nonen_get_bert_inf(text, text_language) + bert = torch.cat([bert1, bert2], 1) all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) From 813cf96e508ba1bb2c658f38c7cc77b797fb4082 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:49:33 +0800 Subject: [PATCH 072/126] Add files via upload --- tools/uvr5/webui.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 71e7ebc..f1357e5 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -1,12 +1,11 @@ import os import traceback,gradio as gr import logging -from tools.i18n.i18n import I18nAuto +from i18n.i18n import I18nAuto i18n = I18nAuto() logger = logging.getLogger(__name__) -import librosa -import soundfile as sf +import ffmpeg import torch import sys from mdxnet import MDXNetDereverb @@ -20,8 +19,7 @@ for name in os.listdir(weight_uvr5_root): device=sys.argv[1] is_half=sys.argv[2] -webui_port_uvr5=int(sys.argv[3]) -is_share=eval(sys.argv[4]) + def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] @@ -55,17 +53,16 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format need_reformat = 1 done = 0 try: - y, sr = librosa.load(inp_path, sr=None) - info = sf.info(inp_path) - channels = info.channels - if channels == 2 and sr == 44100: + info = ffmpeg.probe(inp_path, cmd="ffprobe") + if ( + info["streams"][0]["channels"] == 2 + and info["streams"][0]["sample_rate"] == "44100" + ): need_reformat = 0 pre_fun._path_audio_( inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) done = 1 - else: - need_reformat = 1 except: need_reformat = 1 traceback.print_exc() @@ -74,8 +71,10 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format os.path.join(os.environ["TEMP"]), os.path.basename(inp_path), ) - y_resampled = librosa.resample(y, sr, 44100) - sf.write(tmp_path, y_resampled, 44100, "PCM_16") + os.system( + "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" + % (inp_path, tmp_path) + ) inp_path = tmp_path try: if done == 0: @@ -116,10 +115,10 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format yield "\n".join(infos) -with gr.Blocks(title="RVC WebUI") as app: +with gr.Blocks(title="UVR5 WebUI") as app: gr.Markdown( value= - i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") + "MIT license. https://github.com/Anjok07/ultimatevocalremovergui" ) with gr.Tabs(): with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): @@ -144,7 +143,7 @@ with gr.Blocks(title="RVC WebUI") as app: minimum=0, maximum=20, step=1, - label=i18n("人声提取激进程度"), + label="人声提取激进程度", value=10, interactive=True, visible=False, # 先不开放调整 @@ -180,7 +179,6 @@ with gr.Blocks(title="RVC WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - share=is_share, - server_port=webui_port_uvr5, + server_port=9873, quiet=True, -) +) \ No newline at end of file From fa0c06b62ce89dd5662ccf7dc37f0a19f49a9c53 Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Fri, 26 Jan 2024 12:23:40 +0800 Subject: [PATCH 073/126] Update inference_webui.py https://github.com/RVC-Boss/GPT-SoVITS/pull/205 --- GPT_SoVITS/inference_webui.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 1c5dab6..8e826fa 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -277,6 +277,10 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language) texts = text.split("\n") audio_opt = [] + if prompt_language == "en": + bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language) + else: + bert1 = nonen_get_bert_inf(prompt_text, prompt_language) for text in texts: # 解决输入目标文本的空行导致报错的问题 if (len(text.strip()) == 0): @@ -286,11 +290,6 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones2, word2ph2, norm_text2 = clean_text_inf(text, text_language) else: phones2, word2ph2, norm_text2 = nonen_clean_text_inf(text, text_language) - - if prompt_language == "en": - bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language) - else: - bert1 = nonen_get_bert_inf(prompt_text, prompt_language) if text_language == "en": bert2 = get_bert_inf(phones2, word2ph2, norm_text2, text_language) From 0bcdf0155c340b32d18a33aaf7a96f43b8f1e91e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=82=A6?= Date: Fri, 26 Jan 2024 14:09:50 +0800 Subject: [PATCH 074/126] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E8=AE=B0=E5=BF=86=E5=8A=9F=E8=83=BD=EF=BC=8C=E4=B8=8D=E7=94=A8?= =?UTF-8?q?=E4=BA=8C=E6=AC=A1=E9=80=89=E6=8B=A9=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 添加模型记忆功能,不用二次选择模型 --- GPT_SoVITS/inference_webui.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index bb57183..fdee8d9 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -6,10 +6,25 @@ logging.getLogger("httpx").setLevel(logging.ERROR) logging.getLogger("asyncio").setLevel(logging.ERROR) import pdb -gpt_path = os.environ.get( - "gpt_path", "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" -) -sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth") +if os.path.exists("./gweight.txt"): + with open("./gweight.txt", 'r',encoding="utf-8") as file: + gweight_data = file.read() + gpt_path = os.environ.get( + "gpt_path", gweight_data) +else: + gpt_path = os.environ.get( + "gpt_path", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt") + +if os.path.exists("./sweight.txt"): + with open("./sweight.txt", 'r',encoding="utf-8") as file: + sweight_data = file.read() + sovits_path = os.environ.get("sovits_path", sweight_data) +else: + sovits_path = os.environ.get("sovits_path", "GPT_SoVITS/pretrained_models/s2G488k.pth") +# gpt_path = os.environ.get( +# "gpt_path", "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" +# ) +# sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth") cnhubert_base_path = os.environ.get( "cnhubert_base_path", "pretrained_models/chinese-hubert-base" ) @@ -124,6 +139,7 @@ def change_sovits_weights(sovits_path): vq_model = vq_model.to(device) vq_model.eval() print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) + with open("./sweight.txt","w",encoding="utf-8")as f:f.write(sovits_path) change_sovits_weights(sovits_path) def change_gpt_weights(gpt_path): @@ -140,6 +156,7 @@ def change_gpt_weights(gpt_path): t2s_model.eval() total = sum([param.nelement() for param in t2s_model.parameters()]) print("Number of parameter: %.2fM" % (total / 1e6)) + with open("./gweight.txt","w",encoding="utf-8")as f:f.write(gpt_path) change_gpt_weights(gpt_path) def get_spepc(hps, filename): From 8adcecd19ad2cba330a89d6d9a2744bd97f8846f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=82=A6?= Date: Fri, 26 Jan 2024 15:40:14 +0800 Subject: [PATCH 075/126] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=BC=95=E5=AF=BC?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E8=AE=B0=E5=BF=86=E3=80=81=E5=BC=95=E5=AF=BC?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E6=96=87=E6=9C=AC=E8=AE=B0=E5=BF=86=E3=80=81?= =?UTF-8?q?=E8=AF=AD=E7=A7=8D=E8=AE=B0=E5=BF=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 增加引导音频记忆、引导音频文本记忆、语种记忆 --- GPT_SoVITS/inference_webui.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index fdee8d9..9a9dd10 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -64,6 +64,23 @@ elif torch.backends.mps.is_available(): else: device = "cpu" +# 操作记忆功能 + +file_path = './audio_log.txt' + +upload_audio_path = None +upload_audio_text = "" +upload_audio_lanuage = "中文" + +if os.path.exists(file_path): + with open(file_path, 'r',encoding="utf-8") as file: + text_data = file.read() + text_data = text_data.split("|") + + upload_audio_path = text_data[0] + upload_audio_text = text_data[1] + upload_audio_lanuage = text_data[2] + tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: @@ -183,6 +200,7 @@ dict_language={ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): + with open("./audio_log.txt","w",encoding="utf-8")as f:f.write(f"{wav_path_log}|{prompt_text}|{prompt_language}") t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") From 16196b6f38fe1dd381440fabb434976669d47b40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=82=A6?= Date: Fri, 26 Jan 2024 15:42:49 +0800 Subject: [PATCH 076/126] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=BC=95=E5=AF=BC?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E8=AE=B0=E5=BF=86=E3=80=81=E5=BC=95=E5=AF=BC?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E6=96=87=E6=9C=AC=E8=AE=B0=E5=BF=86=E3=80=81?= =?UTF-8?q?=E8=AF=AD=E7=A7=8D=E8=AE=B0=E5=BF=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 增加引导音频记忆、引导音频文本记忆、语种记忆 --- GPT_SoVITS/inference_webui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 9a9dd10..3b10fa0 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -418,10 +418,10 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: GPT_dropdown.change(change_gpt_weights,[GPT_dropdown],[]) gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): - inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") - prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") + inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath",value=upload_audio_path) + prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value=upload_audio_text) prompt_language = gr.Dropdown( - label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") + label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n(upload_audio_lanuage) ) gr.Markdown(value=i18n("*请填写需要合成的目标文本")) with gr.Row(): From 2d1ddeca42db90c3fe2d0cd79480fd544d87f02b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:34:11 +0800 Subject: [PATCH 077/126] Add files via upload --- tools/uvr5/webui.py | 94 ++++++++++++++++++++++++--------------------- 1 file changed, 50 insertions(+), 44 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index f1357e5..97170bf 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -1,11 +1,12 @@ import os import traceback,gradio as gr import logging -from i18n.i18n import I18nAuto +from tools.i18n.i18n import I18nAuto i18n = I18nAuto() logger = logging.getLogger(__name__) -import ffmpeg +import librosa +import soundfile as sf import torch import sys from mdxnet import MDXNetDereverb @@ -19,7 +20,8 @@ for name in os.listdir(weight_uvr5_root): device=sys.argv[1] is_half=sys.argv[2] - +webui_port_uvr5=int(sys.argv[3]) +is_share=eval(sys.argv[4]) def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] @@ -50,40 +52,32 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format paths = [path.name for path in paths] for path in paths: inp_path = os.path.join(inp_root, path) - need_reformat = 1 - done = 0 + if(os.path.isfile(inp_path)==False):continue try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if ( - info["streams"][0]["channels"] == 2 - and info["streams"][0]["sample_rate"] == "44100" - ): - need_reformat = 0 - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + done = 0 + try: + y, sr = librosa.load(inp_path, sr=None) + info = sf.info(inp_path) + channels = info.channels + if channels == 2 and sr == 44100: + need_reformat = 0 + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + ) + done = 1 + else: + need_reformat = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["TEMP"]), + os.path.basename(inp_path), ) - done = 1 - except: - need_reformat = 1 - traceback.print_exc() - if need_reformat == 1: - tmp_path = "%s/%s.reformatted.wav" % ( - os.path.join(os.environ["TEMP"]), - os.path.basename(inp_path), - ) - os.system( - "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" - % (inp_path, tmp_path) - ) - inp_path = tmp_path - try: - if done == 0: - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: + y_resampled = librosa.resample(y, sr, 44100) + sf.write(tmp_path, y_resampled, 44100, "PCM_16") + inp_path = tmp_path try: if done == 0: pre_fun._path_audio_( @@ -92,10 +86,21 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) except: - infos.append( - "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) - ) - yield "\n".join(infos) + try: + if done == 0: + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + infos.append( + "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) + ) + yield "\n".join(infos) + except: + infos.append("Oh my god. %s->%s"%(os.path.basename(inp_path), traceback.format_exc())) + yield "\n".join(infos) except: infos.append(traceback.format_exc()) yield "\n".join(infos) @@ -115,10 +120,10 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format yield "\n".join(infos) -with gr.Blocks(title="UVR5 WebUI") as app: +with gr.Blocks(title="RVC WebUI") as app: gr.Markdown( value= - "MIT license. https://github.com/Anjok07/ultimatevocalremovergui" + i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) with gr.Tabs(): with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): @@ -143,7 +148,7 @@ with gr.Blocks(title="UVR5 WebUI") as app: minimum=0, maximum=20, step=1, - label="人声提取激进程度", + label=i18n("人声提取激进程度"), value=10, interactive=True, visible=False, # 先不开放调整 @@ -179,6 +184,7 @@ with gr.Blocks(title="UVR5 WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - server_port=9873, + share=is_share, + server_port=webui_port_uvr5, quiet=True, -) \ No newline at end of file +) From 725471b0bfe527153b2712ac80dc617a06049d50 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 17:54:21 +0800 Subject: [PATCH 078/126] =?UTF-8?q?Revert=20"=E5=BC=95=E5=AF=BC=E9=9F=B3?= =?UTF-8?q?=E9=A2=91=E8=AE=B0=E5=BF=86=EF=BC=8C=E6=96=87=E6=9C=AC=E8=AE=B0?= =?UTF-8?q?=E5=BF=86=E5=92=8C=E8=AF=AD=E7=A7=8D=E8=AE=B0=E5=BF=86"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/inference_webui.py | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 1ac11c3..574b9d8 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -67,23 +67,6 @@ elif torch.backends.mps.is_available(): else: device = "cpu" -# 操作记忆功能 - -file_path = './audio_log.txt' - -upload_audio_path = None -upload_audio_text = "" -upload_audio_lanuage = "中文" - -if os.path.exists(file_path): - with open(file_path, 'r',encoding="utf-8") as file: - text_data = file.read() - text_data = text_data.split("|") - - upload_audio_path = text_data[0] - upload_audio_text = text_data[1] - upload_audio_lanuage = text_data[2] - tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: @@ -280,7 +263,6 @@ def nonen_get_bert_inf(text, language): def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): - with open("./audio_log.txt","w",encoding="utf-8")as f:f.write(f"{wav_path_log}|{prompt_text}|{prompt_language}") t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") @@ -500,10 +482,10 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: GPT_dropdown.change(change_gpt_weights,[GPT_dropdown],[]) gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): - inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath",value=upload_audio_path) - prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value=upload_audio_text) + inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") + prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") prompt_language = gr.Dropdown( - label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n(upload_audio_lanuage) + label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") ) gr.Markdown(value=i18n("*请填写需要合成的目标文本")) with gr.Row(): From 9fe955c1bf5f94546c9f699141281f2661c8a180 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:01:15 +0800 Subject: [PATCH 079/126] Add files via upload --- GPT_SoVITS/inference_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 574b9d8..51e18a0 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -296,7 +296,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language) else: phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language) - texts = text.split("\n") + texts = text.replace("\n\n","\n").replace("\n\n","\n").replace("\n\n","\n").split("\n") audio_opt = [] if prompt_language == "en": bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language) From 84ee471936b332bc2ccee024d6dfdedab4f0dc7b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:11:09 +0800 Subject: [PATCH 080/126] Add files via upload --- config.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/config.py b/config.py index 897f53c..a95daf9 100644 --- a/config.py +++ b/config.py @@ -29,6 +29,19 @@ webui_port_subfix = 9871 api_port = 9880 +gpu_name = torch.cuda.get_device_name(0) +if ( + ("16" in gpu_name and "V100" not in gpu_name.upper()) + or "P40" in gpu_name.upper() + or "P10" in gpu_name.upper() + or "1060" in gpu_name + or "1070" in gpu_name + or "1080" in gpu_name +): + is_half=False + +if(is_half==False and infer_device=="cuda"):infer_device="cpu" + class Config: def __init__(self): From f4148cf77fb899c22bcdd4e773d2f24ab34a73e7 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:48:30 +0800 Subject: [PATCH 081/126] =?UTF-8?q?=E6=94=AF=E6=8C=81=E5=90=88=E6=88=90?= =?UTF-8?q?=E6=96=87=E6=9C=AC=E4=B8=AD=E8=8B=B1=E6=B7=B7=E5=90=88=E3=80=81?= =?UTF-8?q?=E6=97=A5=E8=8B=B1=E6=B7=B7=E5=90=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/inference_webui.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 51e18a0..ad36b29 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -135,7 +135,8 @@ def change_sovits_weights(sovits_path): n_speakers=hps.data.n_speakers, **hps.model ) - del vq_model.enc_q + if("pretrained"not in sovits_path): + del vq_model.enc_q if is_half == True: vq_model = vq_model.half().to(device) else: @@ -261,8 +262,8 @@ def nonen_get_bert_inf(text, language): return bert - -def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): +#i18n("不切"),i18n("凑五句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切") +def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,how_to_cut=i18n("不切")): t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") @@ -296,7 +297,13 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language) else: phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language) - texts = text.replace("\n\n","\n").replace("\n\n","\n").replace("\n\n","\n").split("\n") + if(how_to_cut==i18n("凑五句一切")):text=cut1(text) + elif(how_to_cut==i18n("凑50字一切")):text=cut2(text) + elif(how_to_cut==i18n("按中文句号。切")):text=cut3(text) + elif(how_to_cut==i18n("按英文句号.切")):text=cut4(text) + text = text.replace("\n\n","\n").replace("\n\n","\n").replace("\n\n","\n") + if(text[-1]not in splits):text+="。"if text_language=="zh"else "." + texts=text.split("\n") audio_opt = [] if prompt_language == "en": bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language) @@ -439,6 +446,9 @@ def cut2(inp): def cut3(inp): inp = inp.strip("\n") return "\n".join(["%s。" % item for item in inp.strip("。").split("。")]) +def cut4(inp): + inp = inp.strip("\n") + return "\n".join(["%s." % item for item in inp.strip(".").split(".")]) def custom_sort_key(s): # 使用正则表达式提取字符串中的数字部分和非数字部分 @@ -487,17 +497,24 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: prompt_language = gr.Dropdown( label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") ) - gr.Markdown(value=i18n("*请填写需要合成的目标文本")) + gr.Markdown(value=i18n("*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。")) with gr.Row(): text = gr.Textbox(label=i18n("需要合成的文本"), value="") text_language = gr.Dropdown( label=i18n("需要合成的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") ) + how_to_cut = gr.Radio( + label=i18n("怎么切"), + choices=[i18n("不切"),i18n("凑五句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切"),], + value=i18n("凑50字一切"), + interactive=True, + ) inference_button = gr.Button(i18n("合成语音"), variant="primary") output = gr.Audio(label=i18n("输出的语音")) + inference_button.click( get_tts_wav, - [inp_ref, prompt_text, prompt_language, text, text_language], + [inp_ref, prompt_text, prompt_language, text, text_language,how_to_cut], [output], ) @@ -507,10 +524,12 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: button1 = gr.Button(i18n("凑五句一切"), variant="primary") button2 = gr.Button(i18n("凑50字一切"), variant="primary") button3 = gr.Button(i18n("按中文句号。切"), variant="primary") + button4 = gr.Button(i18n("按英文句号.切"), variant="primary") text_opt = gr.Textbox(label=i18n("切分后文本"), value="") button1.click(cut1, [text_inp], [text_opt]) button2.click(cut2, [text_inp], [text_opt]) button3.click(cut3, [text_inp], [text_opt]) + button4.click(cut4, [text_inp], [text_opt]) gr.Markdown(value=i18n("后续将支持混合语种编码文本输入。")) app.queue(concurrency_count=511, max_size=1022).launch( From b24893f4d16789cad23337942727651155d15181 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:54:13 +0800 Subject: [PATCH 082/126] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index ad36b29..aad3992 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -302,7 +302,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, elif(how_to_cut==i18n("按中文句号。切")):text=cut3(text) elif(how_to_cut==i18n("按英文句号.切")):text=cut4(text) text = text.replace("\n\n","\n").replace("\n\n","\n").replace("\n\n","\n") - if(text[-1]not in splits):text+="。"if text_language=="zh"else "." + if(text[-1]not in splits):text+="。"if text_language!="en"else "." texts=text.split("\n") audio_opt = [] if prompt_language == "en": From 8651717a511721db90b764804d0701aea62b6ba7 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:58:23 +0800 Subject: [PATCH 083/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index 93fc8be..beaba42 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -29,3 +29,22 @@ 3-优化模型文件排序逻辑 4-中文分词使用jieba_fast代替jieba + +### 20240126更新 + +1-支持输出文本中英混合、日英混合 + +2-输出可选切分模式 + +3-修复uvr5读取到目录自动跳出的问题 + +4-修复多个换行导致推理报错 + +5-去除推理界面大量冗余log + +6-支持mac训练推理 + +7-自动识别不支持半精度的卡强制单精度。cpu推理下强制单精度。 + + + From 6f1b4fd756996f2f3bc9543389f932575a5922cf Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:59:34 +0800 Subject: [PATCH 084/126] Update config.py --- config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/config.py b/config.py index a95daf9..97369e7 100644 --- a/config.py +++ b/config.py @@ -40,8 +40,7 @@ if ( ): is_half=False -if(is_half==False and infer_device=="cuda"):infer_device="cpu" - +if(infer_device=="cpu"):is_half=False class Config: def __init__(self): From 5c6ba3ebcbd4c34a1ea5b96f130ea8a5ae60d818 Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Fri, 26 Jan 2024 19:43:35 +0800 Subject: [PATCH 085/126] Update not neuraL tone words list. --- GPT_SoVITS/text/tone_sandhi.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/GPT_SoVITS/text/tone_sandhi.py b/GPT_SoVITS/text/tone_sandhi.py index eafb179..68f497d 100644 --- a/GPT_SoVITS/text/tone_sandhi.py +++ b/GPT_SoVITS/text/tone_sandhi.py @@ -455,6 +455,35 @@ class ToneSandhi: "电子", "人人", "虎虎", + '幺幺', + '干嘛', + '学子', + '哈哈', + '数数', + '袅袅', + '局地', + '以下', + '娃哈哈', + '花花草草', + '留得', + '耕地', + '想想', + '熙熙', + '攘攘', + '卵子', + '死死', + '冉冉', + '恳恳', + '佼佼', + '吵吵', + '打打', + '考考', + '整整', + '莘莘', + '落地', + '算子', + '家家户户', + '青青', } self.punc = ":,;。?!“”‘’':,;.?!" From 9b6f359d05bc10d931a0250609b545a12726a30b Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Fri, 26 Jan 2024 19:48:52 +0800 Subject: [PATCH 086/126] fix " ' " --- GPT_SoVITS/text/tone_sandhi.py | 58 +++++++++++++++++----------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/GPT_SoVITS/text/tone_sandhi.py b/GPT_SoVITS/text/tone_sandhi.py index 68f497d..9f62abe 100644 --- a/GPT_SoVITS/text/tone_sandhi.py +++ b/GPT_SoVITS/text/tone_sandhi.py @@ -455,35 +455,35 @@ class ToneSandhi: "电子", "人人", "虎虎", - '幺幺', - '干嘛', - '学子', - '哈哈', - '数数', - '袅袅', - '局地', - '以下', - '娃哈哈', - '花花草草', - '留得', - '耕地', - '想想', - '熙熙', - '攘攘', - '卵子', - '死死', - '冉冉', - '恳恳', - '佼佼', - '吵吵', - '打打', - '考考', - '整整', - '莘莘', - '落地', - '算子', - '家家户户', - '青青', + "幺幺", + "干嘛", + "学子", + "哈哈", + "数数", + "袅袅", + "局地", + "以下", + "娃哈哈", + "花花草草", + "留得", + "耕地", + "想想", + "熙熙", + "攘攘", + "卵子", + "死死", + "冉冉", + "恳恳", + "佼佼", + "吵吵", + "打打", + "考考", + "整整", + "莘莘", + "落地", + "算子", + "家家户户", + "青青", } self.punc = ":,;。?!“”‘’':,;.?!" From b769447ab50466839d23d91d95ffb04f28203b48 Mon Sep 17 00:00:00 2001 From: Atopona <103567097+Atopona@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:02:17 +0800 Subject: [PATCH 087/126] English --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 166602f..2d856bb 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.


[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) +[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) [**English**](./README.md) | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) From 9d549751bb32deda36e6c812d5f4416cadb57006 Mon Sep 17 00:00:00 2001 From: Atopona <103567097+Atopona@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:02:34 +0800 Subject: [PATCH 088/126] chinese --- docs/cn/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cn/README.md b/docs/cn/README.md index 445bf92..5cd5824 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -9,7 +9,7 @@
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) +[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) [**English**](./README.md) | [**中文简体**](./README_ZH.md) From 86716c9c90b40b78b69e34079b8ef306f6a85f1c Mon Sep 17 00:00:00 2001 From: Atopona <103567097+Atopona@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:02:49 +0800 Subject: [PATCH 089/126] Japanese --- docs/ja/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ja/README.md b/docs/ja/README.md index e962df2..181cafa 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -9,7 +9,7 @@
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) +[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) [**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](./README.md) From f73c6d3ddeec9ca9f08d241e51e3de8e067c2ee5 Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:32:23 +0800 Subject: [PATCH 090/126] Update es_ES.json --- i18n/locale/es_ES.json | 339 +++++++++++++++++++++++++++++------------ 1 file changed, 240 insertions(+), 99 deletions(-) diff --git a/i18n/locale/es_ES.json b/i18n/locale/es_ES.json index 08b8176..5445b69 100644 --- a/i18n/locale/es_ES.json +++ b/i18n/locale/es_ES.json @@ -1,135 +1,276 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si es >=3, entonces use el resultado del reconocimiento de tono de 'harvest' con filtro de mediana, el valor es el radio del filtro, su uso puede debilitar el sonido sordo", - "A模型权重": "Un peso modelo para el modelo A.", - "A模型路径": "Modelo A ruta.", - "B模型路径": "Modelo B ruta.", + "很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica compatible para admitir su entrenamiento.", + "UVR5已开启": "UVR5 está habilitado", + "UVR5已关闭": "UVR5 está deshabilitado", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad.
Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo LICENSE en el directorio raíz para obtener más detalles.", + "0-前置数据集获取工具": "0-Herramienta de obtención de conjunto de datos previo", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Herramienta de separación de voz y acompañamiento UVR5 y eliminación de reverberación y retardo", + "是否开启UVR5-WebUI": "¿Habilitar UVR5-WebUI?", + "UVR5进程输出信息": "Información de salida del proceso UVR5", + "0b-语音切分工具": "0b-Herramienta de división de voz", + "音频自动切分输入路径,可文件可文件夹": "Ruta de entrada para la división automática de audio, puede ser un archivo o una carpeta", + "切分后的子音频的输出根目录": "Directorio raíz de salida de los sub-audios después de la división", + "threshold:音量小于这个值视作静音的备选切割点": "umbral: puntos de corte alternativos considerados como silencio si el volumen es menor que este valor", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: duración mínima de cada segmento, si el primer segmento es demasiado corto, se conecta continuamente con los siguientes hasta que supera este valor", + "min_interval:最短切割间隔": "min_interval: intervalo mínimo de corte", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: cómo calcular la curva de volumen, cuanto más pequeño, mayor precisión pero mayor carga computacional (mayor precisión no significa mejor rendimiento)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: duración máxima del silencio después del corte", + "开启语音切割": "Habilitar la división de voz", + "终止语音切割": "Terminar la división de voz", + "max:归一化后最大值多少": "max: valor máximo después de la normalización", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proporción de mezcla de audio normalizado que entra", + "切割使用的进程数": "Número de procesos utilizados para la división", + "语音切割进程输出信息": "Información de salida del proceso de división de voz", + "0c-中文批量离线ASR工具": "0c-Herramienta de ASR en lote fuera de línea en chino", + "开启离线批量ASR": "¿Habilitar ASR en lote fuera de línea?", + "终止ASR进程": "Terminar el proceso ASR", + "批量ASR(中文only)输入文件夹路径": "Ruta de la carpeta de entrada para ASR en lote (solo en chino)", + "ASR进程输出信息": "Información de salida del proceso ASR", + "0d-语音文本校对标注工具": "0d-Herramienta de corrección y etiquetado de texto de voz", + "是否开启打标WebUI": "¿Habilitar la interfaz web de etiquetado?", + "打标数据标注文件路径": "Ruta del archivo de etiquetado de datos", + "打标工具进程输出信息": "Información de salida del proceso de la herramienta de etiquetado", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*Nombre del experimento/modelo", + "显卡信息": "Información de la tarjeta gráfica", + "预训练的SoVITS-G模型路径": "Ruta del modelo SoVITS-G preentrenado", + "预训练的SoVITS-D模型路径": "Ruta del modelo SoVITS-D preentrenado", + "预训练的GPT模型路径": "Ruta del modelo GPT preentrenado", + "1A-训练集格式化工具": "1A-Herramienta de formateo del conjunto de datos de entrenamiento", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Debe haber archivos y carpetas que comiencen con 23456 en el directorio logs/nombre del experimento", + "*文本标注文件": "*Archivo de etiquetado de texto", + "*训练集音频文件目录": "*Directorio de archivos de audio de entrenamiento", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directorio de archivos de audio de entrenamiento, concatenar con los nombres de archivo correspondientes en el archivo list.", + "1Aa-文本内容": "1Aa-Contenido del texto", + "GPU卡号以-分割,每个卡号一个进程": "Número de tarjeta GPU separado por '-', cada número de tarjeta es un proceso", + "预训练的中文BERT模型路径": "Ruta del modelo BERT en chino preentrenado", + "开启文本获取": "¿Habilitar la obtención de texto?", + "终止文本获取进程": "Terminar el proceso de obtención de texto", + "文本进程输出信息": "Información de salida del proceso de obtención de texto", + "1Ab-SSL自监督特征提取": "1Ab-Extracción de características auto-supervisada SSL", + "预训练的SSL模型路径": "Ruta del modelo SSL preentrenado", + "开启SSL提取": "¿Habilitar la extracción SSL?", + "终止SSL提取进程": "Terminar el proceso de extracción SSL", + "SSL进程输出信息": "Información de salida del proceso SSL", + "1Ac-语义token提取": "1Ac-Extracción de tokens semánticos", + "开启语义token提取": "¿Habilitar la extracción de tokens semánticos?", + "终止语义token提取进程": "Terminar el proceso de extracción de tokens semánticos", + "语义token提取进程输出信息": "Información de salida del proceso de extracción de tokens semánticos", + "1Aabc-训练集格式化一键三连": "1Aabc-Formateo del conjunto de datos de entrenamiento en un solo paso", + "开启一键三连": "¿Habilitar un solo paso de formateo?", + "终止一键三连": "Terminar el proceso de un solo paso de formateo", + "一键三连进程输出信息": "Información de salida del proceso de triple acción", + "1B-微调训练": "1B-Entrenamiento de ajuste fino", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Entrenamiento de SoVITS. Los archivos de modelo para compartir se encuentran en SoVITS_weights.", + "每张显卡的batch_size": "Tamaño de lote por tarjeta gráfica", + "总训练轮数total_epoch,不建议太高": "Número total de épocas de entrenamiento, no se recomienda demasiado alto", + "文本模块学习率权重": "Peso de la tasa de aprendizaje del módulo de texto", + "保存频率save_every_epoch": "Frecuencia de guardado (cada epoch)", + "是否仅保存最新的ckpt文件以节省硬盘空间": "¿Guardar solo el último archivo ckpt para ahorrar espacio en disco?", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el modelo final pequeño en la carpeta de pesos en cada punto de guardado?", + "开启SoVITS训练": "Iniciar entrenamiento de SoVITS", + "终止SoVITS训练": "Detener entrenamiento de SoVITS", + "SoVITS训练进程输出信息": "Información de salida del proceso de entrenamiento de SoVITS", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Entrenamiento de GPT. Los archivos de modelo para compartir se encuentran en GPT_weights.", + "总训练轮数total_epoch": "Número total de épocas de entrenamiento", + "开启GPT训练": "Iniciar entrenamiento de GPT", + "终止GPT训练": "Detener entrenamiento de GPT", + "GPT训练进程输出信息": "Información de salida del proceso de entrenamiento de GPT", + "1C-推理": "1C-Inferencia", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Seleccione el modelo almacenado en SoVITS_weights y GPT_weights después del entrenamiento. Uno de ellos es el modelo base, útil para experimentar con TTS de 5 segundos sin entrenamiento.", + "*GPT模型列表": "*Lista de modelos GPT", + "*SoVITS模型列表": "*Lista de modelos SoVITS", + "GPU卡号,只能填1个整数": "Número de tarjeta GPU, solo se puede ingresar un número entero", + "刷新模型路径": "Actualizar la ruta del modelo", + "是否开启TTS推理WebUI": "¿Habilitar la interfaz web de inferencia TTS?", + "TTS推理WebUI进程输出信息": "Información de salida del proceso de interfaz web de inferencia TTS", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Cambio de voz", + "施工中,请静候佳音": "En construcción, por favor espere pacientemente", + "TTS推理进程已开启": "Proceso de inferencia TTS iniciado", + "TTS推理进程已关闭": "Proceso de inferencia TTS cerrado", + "打标工具WebUI已开启": "Interfaz web de la herramienta de etiquetado iniciada", + "打标工具WebUI已关闭": "Interfaz web de la herramienta de etiquetado cerrada", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software es de código abierto bajo la licencia MIT. El autor no tiene control sobre el software. El usuario que lo utilice o distribuya, y el que genere sonidos a partir del software, asume toda la responsabilidad. Si no acepta estos términos, no puede utilizar ni hacer referencia a ningún código o archivo dentro del paquete de software. Consulte el archivo LICENSE en el directorio raíz para obtener más detalles.", + "*请上传并填写参考信息": "*Por favor, suba y complete la información de referencia", + "*请填写需要合成的目标文本": "*Por favor, complete el texto objetivo que necesita ser sintetizado", + "ASR任务开启:%s": "Tarea ASR iniciada: %s", + "GPT训练完成": "Entrenamiento de GPT completado", + "GPT训练开始:%s": "Entrenamiento de GPT iniciado: %s", + "SSL提取进程执行中": "Proceso de extracción SSL en ejecución", + "SSL提取进程结束": "Proceso de extracción SSL finalizado", + "SoVITS训练完成": "Entrenamiento de SoVITS completado", + "SoVITS训练开始:%s": "Entrenamiento de SoVITS iniciado: %s", + "一键三连中途报错": "Error intermedio en triple acción", + "一键三连进程结束": "Proceso de triple acción finalizado", + "中文": "Chino", + "凑50字一切": "Todo para alcanzar las 50 palabras", + "凑五句一切": "Todo para alcanzar las cinco frases", + "切分后文本": "Texto después de la división", + "切割执行中": "División en proceso", + "切割结束": "División finalizada", + "参考音频的文本": "Texto de referencia del audio", + "参考音频的语种": "Idioma del audio de referencia", + "合成语音": "Síntesis de voz", + "后续将支持混合语种编码文本输入。": "En el futuro, se admitirá la entrada de texto con codificación de idiomas mixtos.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "Ya hay una tarea ASR en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de GPT en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "Ya hay una tarea de extracción SSL en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "Ya hay una tarea de entrenamiento de SoVITS en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "Ya hay una tarea de triple acción en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "Ya hay una tarea de división en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "Ya hay una tarea de texto en curso, debe detenerla antes de comenzar la siguiente tarea", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "Ya hay una tarea de extracción de tokens semánticos en curso, debe detenerla antes de comenzar la siguiente tarea", + "已终止ASR进程": "Proceso ASR terminado", + "已终止GPT训练": "Entrenamiento de GPT terminado", + "已终止SoVITS训练": "Entrenamiento de SoVITS terminado", + "已终止所有1a进程": "Se han terminado todos los procesos 1a", + "已终止所有1b进程": "Se han terminado todos los procesos 1b", + "已终止所有一键三连进程": "Se han terminado todos los procesos de triple acción", + "已终止所有切割进程": "Proceso de corte terminado", + "已终止所有语义token进程": "Proceso de extracción de tokens semánticos terminado", + "按中文句号。切": "Cortar según puntos en chino", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Herramienta de división de texto. El resultado de la síntesis puede no ser bueno para textos demasiado largos, por lo que se recomienda dividirlos primero. La síntesis se realiza separando el texto según los saltos de línea y luego uniendo los fragmentos.", + "文本进程执行中": "Proceso de texto en ejecución", + "文本进程结束": "Proceso de texto finalizado", + "日文": "Japonés", + "英文": "Inglés", + "语义token提取进程执行中": "Proceso de extracción de tokens semánticos en ejecución", + "语义token提取进程结束": "Proceso de extracción de tokens semánticos finalizado", + "请上传参考音频": "Por favor, suba el audio de referencia", + "输入路径不存在": "La ruta de entrada no existe", + "输入路径存在但既不是文件也不是文件夹": "La ruta de entrada existe pero no es ni un archivo ni una carpeta", + "输出的语音": "Audio de salida", + "进度:1a-done": "Progreso: 1a-hecho", + "进度:1a-done, 1b-ing": "Progreso: 1a-hecho, 1b-en proceso", + "进度:1a-ing": "Progreso: 1a-en proceso", + "进度:1a1b-done": "Progreso: 1a1b-hecho", + "进度:1a1b-done, 1cing": "Progreso: 1a1b-hecho, 1c-en proceso", + "进度:all-done": "Progreso: todo hecho", + "需要合成的切分前文本": "Texto a sintetizar antes de la división", + "需要合成的文本": "Texto a sintetizar", + "需要合成的语种": "Idioma para la síntesis", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si es >=3, se utiliza la mediana para filtrar los resultados del reconocimiento de altura tonal de harvest, el valor es el radio del filtro. Su uso puede debilitar los sonidos sordos.", + "A模型权重": "Peso del modelo A", + "A模型路径": "Ruta del modelo A", + "B模型路径": "Ruta del modelo B", "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, un tono por línea, en lugar de F0 predeterminado y cambio de tono", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, una línea por altura tonal, en lugar de F0 y cambio de tono predeterminados", "Index Rate": "Tasa de índice", - "Onnx导出": "Exportar Onnx", - "Onnx输出路径": "Ruta de salida Onnx", + "Onnx导出": "Exportar a Onnx", + "Onnx输出路径": "Ruta de salida de Onnx", "RVC模型路径": "Ruta del modelo RVC", - "ckpt处理": "Procesamiento de recibos", - "harvest进程数": "Número de procesos", - "index文件路径不可包含中文": "La ruta del archivo .index no debe contener caracteres chinos.", - "pth文件路径不可包含中文": "La ruta del archivo .pth no debe contener caracteres chinos.", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Complete la configuración del experimento. Los datos del experimento se almacenan en el directorio 'logs', con cada experimento en una carpeta separada. La ruta del nombre del experimento debe ingresarse manualmente y debe contener la configuración del experimento, los registros y los archivos del modelo entrenado.", + "ckpt处理": "Procesamiento de ckpt", + "harvest进程数": "Número de procesos de harvest", + "index文件路径不可包含中文": "La ruta del archivo de índice no puede contener caracteres chinos", + "pth文件路径不可包含中文": "La ruta del archivo pth no puede contener caracteres chinos", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configuración de números de tarjeta rmvpe: usando - para separar los números de tarjeta de diferentes procesos de entrada, por ejemplo, 0-0-1 para ejecutar 2 procesos en la tarjeta 0 y 1 proceso en la tarjeta 1", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Completa la configuración del experimento. Los datos del experimento se encuentran en logs, cada experimento en una carpeta, debe ingresar manualmente la ruta del nombre del experimento, que incluye la configuración del experimento, el registro y los archivos del modelo entrenado.", "step1:正在处理数据": "Paso 1: Procesando datos", - "step2:正在提取音高&正在提取特征": "Paso 2: Extracción del tono y extracción de características", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorra automáticamente la carpeta de capacitación y corte y normalice todos los archivos de audio que se pueden decodificar en audio. Se generarán dos carpetas 'wav' en el directorio del experimento. Actualmente, solo se admite la capacitación de una sola persona.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Use la CPU para extraer el tono (si el modelo tiene guía de tono) y la GPU para extraer características (seleccione el número de tarjeta).", - "step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Complete la configuración de entrenamiento y comience a entrenar el modelo y el índice.", + "step2:正在提取音高&正在提取特征": "Paso 2: Extrayendo tono y características", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorre automáticamente todos los archivos en la carpeta de entrenamiento que se pueden decodificar en archivos de audio y realiza la normalización de segmentos. Genera 2 carpetas de audio en el directorio del experimento; por ahora, solo es compatible con el entrenamiento de una sola persona.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Extraer tono con CPU (si el modelo incluye tono) y extraer características con GPU (seleccionar número de tarjeta)", + "step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Completa la configuración de entrenamiento y comienza a entrenar el modelo e indexar", "step3a:正在训练模型": "Paso 3a: Entrenando el modelo", "一键训练": "Entrenamiento con un clic", - "也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden importar varios archivos de audio. Si existe una ruta de carpeta, esta entrada se ignora.", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Procesamiento por lotes para la separación de acompañamiento vocal utilizando el modelo UVR5.
Ejemplo de formato de ruta de carpeta válido: D:\\ruta\\a\\la\\carpeta\\de\\entrada (copiar desde la barra de direcciones del administrador de archivos).
El modelo se divide en tres categorías:
1. Preservar voces: Elija esta opción para audio sin armonías. Preserva las voces mejor que HP5. Incluye dos modelos incorporados: HP2 y HP3. HP3 puede filtrar ligeramente el acompañamiento pero conserva las voces un poco mejor que HP2.
2. Preservar solo voces principales: Elija esta opción para audio con armonías. Puede debilitar las voces principales. Incluye un modelo incorporado: HP5.
3. Modelos de des-reverberación y des-retardo (por FoxJoy):
  (1) MDX-Net: La mejor opción para la eliminación de reverberación estéreo pero no puede eliminar la reverberación mono;
 (234) DeEcho: Elimina efectos de retardo. El modo Agresivo elimina más a fondo que el modo Normal. DeReverb adicionalmente elimina la reverberación y puede eliminar la reverberación mono, pero no muy efectivamente para contenido de alta frecuencia fuertemente reverberado.
Notas de des-reverberación/des-retardo:
1. El tiempo de procesamiento para el modelo DeEcho-DeReverb es aproximadamente el doble que los otros dos modelos DeEcho.
2. El modelo MDX-Net-Dereverb es bastante lento.
3. La configuración más limpia recomendada es aplicar primero MDX-Net y luego DeEcho-Agresivo.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.", - "伴奏人声分离&去混响&去回声": "Separación de voz acompañante & eliminación de reverberación & eco", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "Guardar nombre", - "保存的文件名, 默认空为和源文件同名": "Nombre del archivo que se guardará, el valor predeterminado es el mismo que el nombre del archivo de origen", - "保存的模型名不带后缀": "Nombre del modelo guardado sin extensión.", - "保存频率save_every_epoch": "Frecuencia de guardado (save_every_epoch)", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y la respiración, prevenir artefactos como la distorsión de sonido electrónico, 0.5 no está activado, reducir aumentará la protección pero puede reducir el efecto del índice", + "也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden ingresar archivos de audio por lotes, seleccionar uno, prioridad para leer carpetas", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Usar - para separar los números de tarjeta utilizados como entrada, por ejemplo, 0-1-2 para usar las tarjetas 0, 1 y 2", + "伴奏人声分离&去混响&去回声": "Separación de acompañamiento y voz principal y eliminación de reverberación y eco", + "使用模型采样率": "Usar tasa de muestreo del modelo", + "使用设备采样率": "Usar tasa de muestreo del dispositivo", + "保存名": "Nombre de guardado", + "保存的文件名, 默认空为和源文件同名": "Nombre de archivo guardado, vacío por defecto para tener el mismo nombre que el archivo fuente", + "保存的模型名不带后缀": "Nombre del modelo guardado sin extensión", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y los sonidos de respiración, evitando artefactos como el desgarro eléctrico. No activar al tirar hasta 0.5, reducir para aumentar la protección, pero puede disminuir la efectividad del índice", "修改": "Modificar", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar la información del modelo (solo admite archivos de modelos pequeños extraídos en la carpeta weights)", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar información del modelo (solo compatible con archivos de modelo pequeños extraídos en la carpeta weights)", "停止音频转换": "Detener la conversión de audio", - "全流程结束!": "¡Todo el proceso ha terminado!", - "刷新音色列表和索引路径": "Actualizar la lista de modelos e índice de rutas", + "全流程结束!": "¡Proceso completo!", + "刷新音色列表和索引路径": "Actualizar lista de tonos e índice de ruta", "加载模型": "Cargar modelo", - "加载预训练底模D路径": "Cargue la ruta del modelo D base pre-entrenada.", - "加载预训练底模G路径": "Cargue la ruta del modelo G base pre-entrenada.", - "单次推理": "单次推理", - "卸载音色省显存": "Descargue la voz para ahorrar memoria GPU", - "变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)", - "后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear", + "加载预训练底模D路径": "Cargar ruta del modelo D preentrenado", + "加载预训练底模G路径": "Cargar ruta del modelo G preentrenado", + "单次推理": "Inferencia única", + "卸载音色省显存": "Descargar tono para ahorrar memoria de video", + "变调(整数, 半音数量, 升八度12降八度-12)": "Cambiar tono (número entero, cantidad de semitonos, subir octava 12 bajar octava -12)", + "后处理重采样至最终采样率,0为不进行重采样": "Reprocesar y remuestrear a la tasa de muestreo final, 0 para no remuestrear", "否": "No", - "启用相位声码器": "启用相位声码器", + "启用相位声码器": "Activar codificador de fase", "响应阈值": "Umbral de respuesta", - "响度因子": "factor de sonoridad", + "响度因子": "Factor de sonoridad", "处理数据": "Procesar datos", "导出Onnx模型": "Exportar modelo Onnx", "导出文件格式": "Formato de archivo de exportación", "常见问题解答": "Preguntas frecuentes", "常规设置": "Configuración general", "开始音频转换": "Iniciar conversión de audio", - "很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento", "性能设置": "Configuración de rendimiento", - "总训练轮数total_epoch": "Total de épocas de entrenamiento (total_epoch)", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).", - "指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal", + "批量推理": "Inferencia por lotes", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta de audio a convertir o cargue varios archivos de audio, la salida se realiza en la carpeta especificada (opt por defecto). ", + "指定输出主人声文件夹": "Especificar carpeta de salida de voz principal", "指定输出文件夹": "Especificar carpeta de salida", - "指定输出非主人声文件夹": "Especifique la carpeta de salida para las voces no principales", - "推理时间(ms):": "Inferir tiempo (ms):", - "推理音色": "inferencia de voz", + "指定输出非主人声文件夹": "Especificar carpeta de salida de no voz principal", + "推理时间(ms):": "Tiempo de inferencia (ms):", + "推理音色": "Tono de inferencia", "提取": "Extraer", - "提取音高和处理数据使用的CPU进程数": "Número de procesos de CPU utilizados para extraer el tono y procesar los datos", + "提取音高和处理数据使用的CPU进程数": "Número de procesadores de CPU utilizados para extraer tono y procesar datos", "是": "Sí", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Guardar solo el archivo ckpt más reciente para ahorrar espacio en disco", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Guardar pequeño modelo final en la carpeta 'weights' en cada punto de guardado", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Si almacenar en caché todos los conjuntos de entrenamiento en la memoria de la GPU. Los conjuntos de datos pequeños (menos de 10 minutos) se pueden almacenar en caché para acelerar el entrenamiento, pero el almacenamiento en caché de conjuntos de datos grandes puede causar errores de memoria en la GPU y no aumenta la velocidad de manera significativa.", - "显卡信息": "información de la GPU", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software es de código abierto bajo la licencia MIT, el autor no tiene ningún control sobre el software, y aquellos que usan el software y difunden los sonidos exportados por el software son los únicos responsables.
Si no está de acuerdo con esta cláusula , no puede utilizar ni citar ningún código ni archivo del paquete de software Consulte el directorio raíz Agreement-LICENSE.txt para obtener más información.", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Almacenar en caché todos los conjuntos de entrenamiento en la memoria de video. Pequeños conjuntos de datos menores a 10 minutos pueden almacenarse en caché para acelerar el entrenamiento; almacenar en caché grandes conjuntos de datos puede saturar la memoria de video y no acelerará mucho.", "查看": "Ver", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo aplicable a archivos de modelos pequeños extraídos de la carpeta 'pesos')", - "检索特征占比": "Proporción de función de búsqueda", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo compatible con archivos pequeños extraídos en la carpeta weights)", + "检索特征占比": "Proporción de características de búsqueda", "模型": "Modelo", - "模型推理": "inferencia del modelo", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingrese la ruta de un archivo de modelo grande en la carpeta 'logs'), aplicable cuando desea extraer un archivo de modelo pequeño después de entrenar a mitad de camino y no se guardó automáticamente, o cuando desea probar un modelo intermedio", - "模型是否带音高指导": "Si el modelo tiene guía de tono.", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Si el modelo tiene guía de tono (necesaria para cantar, pero no para hablar)", - "模型是否带音高指导,1是0否": "Si el modelo tiene guía de tono, 1 para sí, 0 para no", + "模型推理": "Inferencia de modelo", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingresar la ruta del modelo grande en la carpeta logs), útil cuando se quiere dejar de entrenar a la mitad y el modelo no ha extraído automáticamente un modelo pequeño guardado, o para probar la situación del modelo intermedio", + "模型是否带音高指导": "¿El modelo incluye guía de altura tonal?", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "¿El modelo incluye guía de altura tonal? (Necesario para cantar, opcional para voz)", + "模型是否带音高指导,1是0否": "¿El modelo incluye guía de altura tonal? 1 para sí, 0 para no", "模型版本型号": "Versión y modelo del modelo", - "模型融合, 可用于测试音色融合": "Fusión de modelos, se puede utilizar para fusionar diferentes voces", + "模型融合, 可用于测试音色融合": "Fusión de modelos, útil para probar la mezcla de tonos", "模型路径": "Ruta del modelo", - "每张显卡的batch_size": "Tamaño del lote (batch_size) por tarjeta gráfica", - "淡入淡出长度": "Duración del fundido de entrada/salida", + "淡入淡出长度": "Longitud de desvanecimiento", "版本": "Versión", "特征提取": "Extracción de características", - "特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de características, si está vacío, se utilizará el resultado de la selección desplegable", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tecla +12 recomendada para conversión de voz de hombre a mujer, tecla -12 para conversión de voz de mujer a hombre. Si el rango de tono es demasiado amplio y causa distorsión, ajústelo usted mismo a un rango adecuado.", + "特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo de la biblioteca de búsqueda de características, si está vacío, se utiliza el resultado seleccionado en el menú desplegable", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recomendación para cambiar de hombre a mujer +12 teclas, cambiar de mujer a hombre -12 teclas. Si la amplitud del rango tonal causa distorsión del tono, también puede ajustarse manualmente al rango tonal adecuado. ", "目标采样率": "Tasa de muestreo objetivo", - "算法延迟(ms):": "算法延迟(ms):", - "自动检测index路径,下拉式选择(dropdown)": "Detección automática de la ruta del índice, selección desplegable (dropdown)", + "算法延迟(ms):": "Retardo del algoritmo (ms):", + "自动检测index路径,下拉式选择(dropdown)": "Detectar automáticamente la ruta del índice, seleccionar en menú desplegable", "融合": "Fusión", - "要改的模型信息": "Información del modelo a modificar", - "要置入的模型信息": "Información del modelo a colocar.", - "训练": "Entrenamiento", - "训练模型": "Entrenar Modelo", - "训练特征索引": "Índice de características", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento finalizado, puede ver el registro de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento", - "请指定说话人id": "ID del modelo", - "请选择index文件": "Seleccione el archivo .index", - "请选择pth文件": "Seleccione el archivo .pth", - "请选择说话人id": "Seleccione una identificación de altavoz", - "转换": "Conversión", - "输入实验名": "Ingrese el nombre del modelo", - "输入待处理音频文件夹路径": "Ingrese la ruta a la carpeta de audio que se procesará", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta a la carpeta de audio que se procesará (simplemente cópiela desde la barra de direcciones del administrador de archivos)", - "输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo del audio que se procesará (el formato predeterminado es el ejemplo correcto)", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Proporción de fusión para reemplazar el sobre de volumen de entrada con el sobre de volumen de salida, cuanto más cerca de 1, más se utiliza el sobre de salida", - "输入监听": "输入监听", - "输入训练文件夹路径": "Introduzca la ruta de la carpeta de entrenamiento", + "要改的模型信息": "Información del modelo a cambiar", + "要置入的模型信息": "Información del modelo a insertar", + "训练": "Entrenar", + "训练模型": "Entrenar modelo", + "训练特征索引": "Entrenar índice de características", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento terminado, puede ver registros de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento", + "请指定说话人id": "Por favor, especifique el ID del hablante", + "请选择index文件": "Seleccione el archivo index, por favor", + "请选择pth文件": "Seleccione el archivo pth, por favor", + "请选择说话人id": "Seleccione el ID del hablante, por favor", + "转换": "Convertir", + "输入实验名": "Ingrese el nombre del experimento", + "输入待处理音频文件夹路径": "Ingrese la ruta de la carpeta de audio a procesar", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta de la carpeta de audio a procesar (puede copiarla desde la barra de direcciones del administrador de archivos)", + "输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo de audio a procesar (el formato predeterminado es un ejemplo correcto)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Ingrese la proporción de fusión para reemplazar el sobre de volumen de origen con el sobre de volumen de salida; cuanto más cercano a 1, más se utiliza el sobre de salida", + "输入监听": "Entrada de monitoreo", + "输入训练文件夹路径": "Ingrese la ruta de la carpeta de entrenamiento", "输入设备": "Dispositivo de entrada", - "输入降噪": "Reducción de ruido de entrada", + "输入降噪": "Entrada de reducción de ruido", "输出信息": "Información de salida", - "输出变声": "输出变声", + "输出变声": "Salida de cambio de voz", "输出设备": "Dispositivo de salida", - "输出降噪": "Reducción de ruido de salida", - "输出音频(右下角三个点,点了可以下载)": "Salida de audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)", - "选择.index文件": "Seleccione el archivo .index", - "选择.pth文件": "Seleccione el archivo .pth", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono, las voces de entrada se pueden acelerar con pm, harvest tiene buenos graves pero es muy lento, crepe es bueno pero se come las GPUs", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono, use 'pm' para acelerar la entrada de canto, 'harvest' es bueno para los graves pero extremadamente lento, 'crepe' tiene buenos resultados pero consume GPU", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: la canción de entrada se puede acelerar con pm, la voz de alta calidad pero CPU pobre se puede acelerar con dio, harvest es mejor pero más lento, rmvpe es el mejor y se come ligeramente la CPU/GPU", - "采样率:": "采样率:", + "输出降噪": "Salida de reducción de ruido", + "输出音频(右下角三个点,点了可以下载)": "Salida de audio (los tres puntos en la esquina inferior derecha, haga clic para descargar)", + "选择.index文件": "Seleccione el archivo .index, por favor", + "选择.pth文件": "Seleccione el archivo .pth, por favor", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono; para voz, pm acelera, harvest es lento pero tiene buenos bajos, crepe tiene buen efecto pero consume GPU, rmvpe tiene el mejor efecto y consume poco GPU", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: para voz, pm acelera con buena calidad de audio pero CPU deficiente, dio acelera pero harvest tiene mejor calidad aunque es más lento, rmvpe tiene el mejor efecto y consume poco CPU/GPU", + "采样率:": "Tasa de muestreo:", "采样长度": "Longitud de muestreo", - "重载设备列表": "Actualizar lista de dispositivos", - "音调设置": "Ajuste de tono", - "音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)", + "重载设备列表": "Recargar lista de dispositivos", + "音调设置": "Configuración de tono", + "音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice controladores del mismo tipo)", "音高算法": "Algoritmo de tono", - "额外推理时长": "Tiempo de inferencia adicional" + "额外推理时长": "Tiempo adicional de inferencia" } From 2eabcfa8288662a3f3d9d7773077404648605377 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:50:26 +0800 Subject: [PATCH 091/126] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index aad3992..d4a817b 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -424,7 +424,7 @@ def cut2(inp): inp = inp.strip("\n") inps = split(inp) if len(inps) < 2: - return [inp] + return inp opts = [] summ = 0 tmp_str = "" From 5237229d397294cafaf6bc7f0e34220970aab97b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:56:32 +0800 Subject: [PATCH 092/126] Update inference_webui.py --- GPT_SoVITS/inference_webui.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index d4a817b..5495be5 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -423,6 +423,7 @@ def cut1(inp): def cut2(inp): inp = inp.strip("\n") inps = split(inp) + # print(inps) if len(inps) < 2: return inp opts = [] @@ -437,7 +438,8 @@ def cut2(inp): tmp_str = "" if tmp_str != "": opts.append(tmp_str) - if len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起 + # print(opts) + if len(opts)>1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起 opts[-2] = opts[-2] + opts[-1] opts = opts[:-1] return "\n".join(opts) From f7130944c05c33face7799dd84c289fa4a12cde9 Mon Sep 17 00:00:00 2001 From: KamioRinn Date: Sat, 27 Jan 2024 06:00:48 +0800 Subject: [PATCH 093/126] fix uvr5 hp3 output --- tools/uvr5/vr.py | 3 +++ tools/uvr5/webui.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/uvr5/vr.py b/tools/uvr5/vr.py index 448e57f..86fbac4 100644 --- a/tools/uvr5/vr.py +++ b/tools/uvr5/vr.py @@ -110,6 +110,9 @@ class AudioPre: y_spec_m = pred * X_phase v_spec_m = X_spec_m - y_spec_m + if is_hp3 == True: + ins_root,vocal_root = vocal_root,ins_root + if ins_root is not None: if self.data["high_end_process"].startswith("mirroring"): input_high_end_ = spec_utils.mirroring( diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 97170bf..e09830d 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -81,7 +81,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format try: if done == 0: pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) @@ -89,7 +89,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format try: if done == 0: pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) From 47e113a7b29fccac69965d6725bded645d7b5b94 Mon Sep 17 00:00:00 2001 From: KakaruHayate <97896816+KakaruHayate@users.noreply.github.com> Date: Sat, 27 Jan 2024 10:49:48 +0800 Subject: [PATCH 094/126] support powershell --- go-webui.ps1 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go-webui.ps1 b/go-webui.ps1 index 5d225a4..219798a 100644 --- a/go-webui.ps1 +++ b/go-webui.ps1 @@ -1,2 +1,4 @@ -runtime\python.exe webui.py +$ErrorActionPreference = "SilentlyContinue" +chcp 65001 +& "$PSScriptRoot\runtime\python.exe" "$PSScriptRoot\webui.py" pause From 14097eee3b775316b7f38927284e273a5bda6c3c Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sat, 27 Jan 2024 11:59:23 +0800 Subject: [PATCH 095/126] Update go-webui.bat --- go-webui.bat | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/go-webui.bat b/go-webui.bat index be4135e..2f98538 100644 --- a/go-webui.bat +++ b/go-webui.bat @@ -1,4 +1,2 @@ -@echo off -chcp 65001 -"%~dp0\runtime\python.exe" "%~dp0\webui.py" -pause \ No newline at end of file +runtime\python.exe webui.py +pause From cf19b6f38761dce4593fe2bf99ee29b25d821adc Mon Sep 17 00:00:00 2001 From: Erythrocyte3803 <2544390577@qq.com> Date: Sat, 27 Jan 2024 13:20:21 +0900 Subject: [PATCH 096/126] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9C=A8=E5=B0=9D?= =?UTF-8?q?=E8=AF=95=E7=8B=AC=E7=AB=8B=E8=BF=90=E8=A1=8C=20inference=5Fweb?= =?UTF-8?q?ui=20=E6=97=B6=E5=80=99=EF=BC=8C=E6=89=BE=E4=B8=8D=E5=88=B0?= =?UTF-8?q?=E9=83=A8=E5=88=86=E5=89=8D=E7=BD=AE=E6=A8=A1=E5=9E=8B(?= =?UTF-8?q?=E4=BE=8B=E5=A6=82bert)=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/inference_webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 5495be5..f725c5c 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -29,10 +29,10 @@ else: # ) # sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth") cnhubert_base_path = os.environ.get( - "cnhubert_base_path", "pretrained_models/chinese-hubert-base" + "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base" ) bert_path = os.environ.get( - "bert_path", "pretrained_models/chinese-roberta-wwm-ext-large" + "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" ) infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) From a0af8c32de381ad1fdd6207e838c550bc0e10546 Mon Sep 17 00:00:00 2001 From: YuChuXi Date: Sat, 27 Jan 2024 14:11:10 +0800 Subject: [PATCH 097/126] =?UTF-8?q?config.py=20:=20=20=20=E7=8E=B0?= =?UTF-8?q?=E5=9C=A8=E5=9C=A8=E6=A3=80=E6=9F=A5=E5=8D=8A=E7=B2=BE=E5=BA=A6?= =?UTF-8?q?=E8=AE=A1=E7=AE=97=E5=89=8D=E4=BC=9A=E5=85=88=E6=A3=80=E6=9F=A5?= =?UTF-8?q?CUDA=20webui.py=20:=20=20=20=E4=BC=98=E5=8C=96users.py=E7=9A=84?= =?UTF-8?q?=E5=86=99=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 21 +++++++++++---------- webui.py | 18 +++++++++++++----- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/config.py b/config.py index 97369e7..fef2fac 100644 --- a/config.py +++ b/config.py @@ -29,16 +29,17 @@ webui_port_subfix = 9871 api_port = 9880 -gpu_name = torch.cuda.get_device_name(0) -if ( - ("16" in gpu_name and "V100" not in gpu_name.upper()) - or "P40" in gpu_name.upper() - or "P10" in gpu_name.upper() - or "1060" in gpu_name - or "1070" in gpu_name - or "1080" in gpu_name -): - is_half=False +if infer_device == "cuda": + gpu_name = torch.cuda.get_device_name(0) + if ( + ("16" in gpu_name and "V100" not in gpu_name.upper()) + or "P40" in gpu_name.upper() + or "P10" in gpu_name.upper() + or "1060" in gpu_name + or "1070" in gpu_name + or "1080" in gpu_name + ): + is_half=False if(infer_device=="cpu"):is_half=False diff --git a/webui.py b/webui.py index c71cea3..5f3fcb6 100644 --- a/webui.py +++ b/webui.py @@ -25,13 +25,21 @@ for path in site.getsitepackages(): if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir] #os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" +os.environ["all_proxy"] = "" + for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): - with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir) - ) + print(site_packages_roots) + try: + with open("%s/users.pth" % (site_packages_root), "w+") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) + break + except PermissionError: + pass + from tools import my_utils import traceback import shutil From a770d3c985888f38a479dd82ef652cab11f8e83c Mon Sep 17 00:00:00 2001 From: YuChuXi Date: Sat, 27 Jan 2024 14:28:37 +0800 Subject: [PATCH 098/126] =?UTF-8?q?=E5=88=A0=E9=99=A4=E5=A4=9A=E4=BD=99?= =?UTF-8?q?=E7=9A=84=E8=B0=83=E8=AF=95=E4=BB=A3=E7=A0=81=20=E7=A6=81?= =?UTF-8?q?=E7=94=A8SOCK=E4=BB=A3=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- webui.py | 1 - 1 file changed, 1 deletion(-) diff --git a/webui.py b/webui.py index 5f3fcb6..0333c4e 100644 --- a/webui.py +++ b/webui.py @@ -29,7 +29,6 @@ os.environ["all_proxy"] = "" for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): - print(site_packages_roots) try: with open("%s/users.pth" % (site_packages_root), "w+") as f: f.write( From d86ffa2386ab4b93a2ca7c1a42ef0cb3fa4592c7 Mon Sep 17 00:00:00 2001 From: Kenn Zhang Date: Sat, 27 Jan 2024 15:15:08 +0800 Subject: [PATCH 099/126] =?UTF-8?q?=E4=BF=AE=E6=94=B9Dockerfile=EF=BC=8C?= =?UTF-8?q?=E4=BD=BF=E5=85=B6=E7=9B=B4=E6=8E=A5=E5=88=A9=E7=94=A8=E6=9C=80?= =?UTF-8?q?=E6=96=B0=E7=9A=84requirements.txt=E5=AE=89=E8=A3=85Python?= =?UTF-8?q?=E5=8C=85=EF=BC=9B=E5=B9=B6=E5=9C=A8=E6=9E=84=E5=BB=BA=E8=BF=87?= =?UTF-8?q?=E7=A8=8B=E4=B8=AD=E9=A2=84=E5=85=88=E4=B8=8B=E8=BD=BDmoda=20AS?= =?UTF-8?q?R=E5=92=8Cnltk=E7=9B=B8=E5=85=B3=E7=9A=84=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E5=88=B0=E9=95=9C=E5=83=8F=E4=B8=AD=E4=BB=A5=E4=BE=BF=E5=8A=A0?= =?UTF-8?q?=E5=BF=AB=E5=88=9D=E6=AC=A1=E8=BF=90=E8=A1=8C=E7=9A=84=E9=80=9F?= =?UTF-8?q?=E5=BA=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Docker/download.py | 7 +++++++ Dockerfile | 26 +++++++++----------------- README.md | 3 ++- docker-compose.yaml | 2 +- docs/cn/README.md | 2 +- docs/ja/README.md | 3 ++- 6 files changed, 22 insertions(+), 21 deletions(-) create mode 100644 Docker/download.py diff --git a/Docker/download.py b/Docker/download.py new file mode 100644 index 0000000..46becac --- /dev/null +++ b/Docker/download.py @@ -0,0 +1,7 @@ +# Download moda ASR related models +from modelscope import snapshot_download +model_dir = snapshot_download('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch') +model_dir = snapshot_download('damo/speech_fsmn_vad_zh-cn-16k-common-pytorch') +model_dir = snapshot_download('damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch') + + diff --git a/Dockerfile b/Dockerfile index cbf92cb..78c09e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04 LABEL maintainer="breakstring@hotmail.com" -LABEL version="dev-20240123.03" +LABEL version="dev-20240127.f9387e0" LABEL description="Docker image for GPT-SoVITS" @@ -18,27 +18,19 @@ RUN apt-get update && \ WORKDIR /workspace COPY . /workspace +# install python packages +RUN pip install -r requirements.txt + # Download models RUN chmod +x /workspace/Docker/download.sh && /workspace/Docker/download.sh -# 本应该从 requirements.txt 里面安装package,但是由于funasr和modelscope的问题,暂时先在后面手工安装依赖包吧 -RUN pip install --no-cache-dir torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet transformers jieba psutil PyYAML -# 这里强制指定了modelscope和funasr的版本,后面damo_asr的模型让它们自己下载 -RUN pip install --no-cache-dir modelscope~=1.10.0 torchaudio sentencepiece funasr~=0.8.7 +# Download moda ASR related +RUN python /workspace/Docker/download.py -# 先屏蔽掉,让容器里自己下载 -# Clone damo_asr -#WORKDIR /workspace/tools/damo_asr/models -#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && \ -# (cd speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch && git lfs pull) -#RUN git clone --depth 1 https://www.modelscope.cn/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch.git speech_fsmn_vad_zh-cn-16k-common-pytorch && \ -# (cd speech_fsmn_vad_zh-cn-16k-common-pytorch && git lfs pull) -#RUN git clone --depth 1 https://www.modelscope.cn/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git punc_ct-transformer_zh-cn-common-vocab272727-pytorch && \ -# (cd punc_ct-transformer_zh-cn-common-vocab272727-pytorch && git lfs pull) +# Download nltk realted +RUN python -m nltk.downloader averaged_perceptron_tagger +RUN python -m nltk.downloader cmudict -#RUN parallel --will-cite -a /workspace/Docker/damo.sha256 "echo -n {} | sha256sum -c" - -#WORKDIR /workspace EXPOSE 9870 EXPOSE 9871 diff --git a/README.md b/README.md index 2d856bb..30b6b97 100644 --- a/README.md +++ b/README.md @@ -114,8 +114,9 @@ For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) ### Using Docker -#### docker-compose.yaml configuration +#### docker-compose.yaml configuration +0. Regarding image tags: Due to rapid updates in the codebase, the packaging of images with the 'latest' tag on [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) has been temporarily suspended, shifting instead to using the hash value of the most current commit from the targeted git repo. For instance, you might see image names and tags such as: breakstring/gpt-sovits:dev-20240127.f9387e0. This signifies the image was packaged for the commit f9387e0 on January 27, 2024. Additionally, when using these images, please remember to modify the parameters in your docker-compose.yaml or docker command line accordingly. Alternatively, you can build locally using a Dockerfile according to your own needs. 1. Environment Variables: - is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation. diff --git a/docker-compose.yaml b/docker-compose.yaml index ed6f82a..09967b1 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3.8' services: gpt-sovits: - image: breakstring/gpt-sovits:dev-20240123.03 + image: breakstring/gpt-sovits:dev-20240127.f9387e0 container_name: gpt-sovits-container environment: - is_half=False diff --git a/docs/cn/README.md b/docs/cn/README.md index 5cd5824..56033c1 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -103,7 +103,7 @@ brew install ffmpeg ### 在 Docker 中使用 #### docker-compose.yaml 设置 - +0. image的标签:由于代码库更新很快,所以在 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) 上暂时不再打包 latest 标签的镜像,转而通过当前针对的git repo中当前最新的commit的hash值。例如,您会看到形如: breakstring/gpt-sovits:dev-20240127.f9387e0 这样的镜像名称和标签。即代表 2024年1月27日打包针对 f9387e0 这个commit 的镜像。同时,您在使用时请注意修改 docker-compose.yaml 或者 docker 命令行中的参数。或者在本地根据您自己的需求通过Dockerfile进行构建。 1. 环境变量: - is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时,一般都是它引起的,可以根据实际情况来调整为True或者False。 diff --git a/docs/ja/README.md b/docs/ja/README.md index 181cafa..5e93287 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -99,8 +99,9 @@ brew install ffmpeg ### Dockerの使用 -#### docker-compose.yamlの設定 +#### docker-compose.yamlの設定 +0. イメージのタグについて:コードベースの更新が速いため、[Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) での最新(latest)タグのイメージのパッケージングを一時停止し、代わりに現在対象としているgitリポジトリの最新のコミットのハッシュ値を用います。例えば、breakstring/gpt-sovits:dev-20240127.f9387e0 のようなイメージ名とタグを見ることができます。これは、2024年1月27日にf9387e0のコミットに対してパッケージされたイメージを意味します。また、使用時はdocker-compose.yamlやdockerのコマンドラインのパラメータを変更することに注意してください。または、ご自身のニーズに合わせてDockerfileを使ってローカルでビルドすることができます。 1. 環境変数: - `is_half`:半精度/倍精度の制御。"SSL抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じてTrueまたはFalseに調整してください。 From 13f1e55d13acbd8223bbcec7b8b6f7bf5ea216f7 Mon Sep 17 00:00:00 2001 From: Kenn Zhang Date: Sat, 27 Jan 2024 15:37:43 +0800 Subject: [PATCH 100/126] =?UTF-8?q?=E4=BF=AE=E6=94=B9Dockerfile=E4=B8=AD?= =?UTF-8?q?=E7=9A=84=E7=89=88=E6=9C=AC=E4=BB=A5=E5=8F=8Adocker-compose.yam?= =?UTF-8?q?l=E4=B8=AD=E7=9A=84image=E5=B0=8F=E8=8A=82;=E5=9C=A8readme?= =?UTF-8?q?=E4=B8=AD=E5=A2=9E=E5=8A=A0=E7=9B=B8=E5=85=B3=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Dockerfile | 2 +- README.md | 4 ++-- docker-compose.yaml | 2 +- docs/cn/README.md | 4 ++-- docs/ja/README.md | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 78c09e0..ac85a4b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04 LABEL maintainer="breakstring@hotmail.com" -LABEL version="dev-20240127.f9387e0" +LABEL version="dev-20240127" LABEL description="Docker image for GPT-SoVITS" diff --git a/README.md b/README.md index 30b6b97..c019127 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) #### docker-compose.yaml configuration -0. Regarding image tags: Due to rapid updates in the codebase, the packaging of images with the 'latest' tag on [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) has been temporarily suspended, shifting instead to using the hash value of the most current commit from the targeted git repo. For instance, you might see image names and tags such as: breakstring/gpt-sovits:dev-20240127.f9387e0. This signifies the image was packaged for the commit f9387e0 on January 27, 2024. Additionally, when using these images, please remember to modify the parameters in your docker-compose.yaml or docker command line accordingly. Alternatively, you can build locally using a Dockerfile according to your own needs. +0. Regarding image tags: Due to rapid updates in the codebase and the slow process of packaging and testing images, please check [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) for the currently packaged latest images and select as per your situation, or alternatively, build locally using a Dockerfile according to your own needs. 1. Environment Variables: - is_half: Controls half-precision/double-precision. This is typically the cause if the content under the directories 4-cnhubert/5-wav32k is not generated correctly during the "SSL extracting" step. Adjust to True or False based on your actual situation. @@ -134,7 +134,7 @@ docker compose -f "docker-compose.yaml" up -d As above, modify the corresponding parameters based on your actual situation, then run the following command: ``` -docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx ``` diff --git a/docker-compose.yaml b/docker-compose.yaml index 09967b1..874824e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3.8' services: gpt-sovits: - image: breakstring/gpt-sovits:dev-20240127.f9387e0 + image: breakstring/gpt-sovits:xxxxx # please change the image name and tag base your environment container_name: gpt-sovits-container environment: - is_half=False diff --git a/docs/cn/README.md b/docs/cn/README.md index 56033c1..02ae1cb 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -103,7 +103,7 @@ brew install ffmpeg ### 在 Docker 中使用 #### docker-compose.yaml 设置 -0. image的标签:由于代码库更新很快,所以在 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) 上暂时不再打包 latest 标签的镜像,转而通过当前针对的git repo中当前最新的commit的hash值。例如,您会看到形如: breakstring/gpt-sovits:dev-20240127.f9387e0 这样的镜像名称和标签。即代表 2024年1月27日打包针对 f9387e0 这个commit 的镜像。同时,您在使用时请注意修改 docker-compose.yaml 或者 docker 命令行中的参数。或者在本地根据您自己的需求通过Dockerfile进行构建。 +0. image的标签:由于代码库更新很快,镜像的打包和测试又很慢,所以请自行在 [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) 查看当前打包好的最新的镜像并根据自己的情况选用,或者在本地根据您自己的需求通过Dockerfile进行构建。 1. 环境变量: - is_half: 半精度/双精度控制。在进行 "SSL extracting" 步骤时如果无法正确生成 4-cnhubert/5-wav32k 目录下的内容时,一般都是它引起的,可以根据实际情况来调整为True或者False。 @@ -122,7 +122,7 @@ docker compose -f "docker-compose.yaml" up -d 同上,根据您自己的实际情况修改对应的参数,然后运行如下命令: ``` -docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx ``` diff --git a/docs/ja/README.md b/docs/ja/README.md index 5e93287..1120336 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -101,7 +101,7 @@ brew install ffmpeg #### docker-compose.yamlの設定 -0. イメージのタグについて:コードベースの更新が速いため、[Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) での最新(latest)タグのイメージのパッケージングを一時停止し、代わりに現在対象としているgitリポジトリの最新のコミットのハッシュ値を用います。例えば、breakstring/gpt-sovits:dev-20240127.f9387e0 のようなイメージ名とタグを見ることができます。これは、2024年1月27日にf9387e0のコミットに対してパッケージされたイメージを意味します。また、使用時はdocker-compose.yamlやdockerのコマンドラインのパラメータを変更することに注意してください。または、ご自身のニーズに合わせてDockerfileを使ってローカルでビルドすることができます。 +0. イメージのタグについて:コードベースの更新が速く、イメージのパッケージングとテストが遅いため、[Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) で現在パッケージされている最新のイメージをご覧になり、ご自身の状況に応じて選択するか、またはご自身のニーズに応じてDockerfileを使用してローカルで構築してください。 1. 環境変数: - `is_half`:半精度/倍精度の制御。"SSL抽出"ステップ中に`4-cnhubert/5-wav32k`ディレクトリ内の内容が正しく生成されない場合、通常これが原因です。実際の状況に応じてTrueまたはFalseに調整してください。 @@ -118,7 +118,7 @@ docker compose -f "docker-compose.yaml" up -d 上記と同様に、実際の状況に基づいて対応するパラメータを変更し、次のコマンドを実行します: ```markdown -docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:dev-20240123.03 +docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9870:9870 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx ``` From fd89bb09d6fbebbc38c422b0f08092d50a29c954 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sat, 27 Jan 2024 16:05:05 +0800 Subject: [PATCH 101/126] Update webui.py --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 0333c4e..885aecb 100644 --- a/webui.py +++ b/webui.py @@ -30,7 +30,7 @@ os.environ["all_proxy"] = "" for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): try: - with open("%s/users.pth" % (site_packages_root), "w+") as f: + with open("%s/users.pth" % (site_packages_root), "w") as f: f.write( "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" % (now_dir, now_dir, now_dir, now_dir, now_dir) From 5f39686b09a8eba5c3e70d8b289df5f07dfa5ef5 Mon Sep 17 00:00:00 2001 From: Wu Zichen Date: Sat, 27 Jan 2024 18:43:17 +0800 Subject: [PATCH 102/126] Update README --- README.md | 13 ++++++++++--- docs/cn/README.md | 13 ++++++++++--- docs/ja/README.md | 13 ++++++++++--- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 2d856bb..8613816 100644 --- a/README.md +++ b/README.md @@ -43,12 +43,19 @@ If you are a Windows user (tested with win>=10) you can install directly via the - Python 3.9, PyTorch 2.0.1, CUDA 11 - Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 -- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon, GPU) _Note: numba==0.56.4 require py<3.11_ ### For Mac Users -If you are a Mac user, please install by using the following commands: +If you are a Mac user, make sure you meet the following conditions for training and inferencing with GPU: +- Mac computers with Apple silicon or AMD GPUs +- macOS 12.3 or later +- Xcode command-line tools installed by running `xcode-select --install` + +_Other Macs can do inference with CPU only._ + +Then install by using the following commands: #### Create Environment ```bash conda create -n GPTSoVits python=3.9 @@ -60,7 +67,7 @@ pip install -r requirements.txt pip uninstall torch torchaudio pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu ``` -_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select GPU for operation. Additionally, there may be memory leak issues when using Mac for inference, restarting the inference webUI can release the memory._ +_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select "GPU Conversion". Additionally, there might be memory leak issues, especially during inference. Restarting the inference webUI can help._ ### Quick Install with Conda ```bash diff --git a/docs/cn/README.md b/docs/cn/README.md index 5cd5824..3b07753 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -43,12 +43,19 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- - Python 3.9、PyTorch 2.0.1和CUDA 11 - Python 3.10.13, PyTorch 2.1.2和CUDA 12.3 -- Python 3.9、Pytorch 2.3.0.dev20240122和macOS 14.3(Apple 芯片,MPS) +- Python 3.9、Pytorch 2.3.0.dev20240122和macOS 14.3(Apple 芯片,GPU) _注意: numba==0.56.4 需要 python<3.11_ ### Mac 用户 -如果你是Mac用户,请使用以下命令安装: +如果你是Mac用户,请先确保满足以下条件以使用GPU进行训练和推理: +- 搭载Apple芯片或AMD GPU的Mac +- macOS 12.3或更高版本 +- 已通过运行`xcode-select --install`安装Xcode command-line tools + +_其他Mac仅支持使用CPU进行推理_ + +然后使用以下命令安装: #### 创建环境 ```bash conda create -n GPTSoVits python=3.9 @@ -60,7 +67,7 @@ pip install -r requirements.txt pip uninstall torch torchaudio pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu ``` -_注意:如需使用UVR5进行预处理,建议[下载原项目GUI](https://github.com/Anjok07/ultimatevocalremovergui),勾选GPU运行。另外,使用Mac推理时可能存在内存泄漏问题,重启推理UI即可释放内存。_ +_注意:如需使用UVR5进行预处理,建议[下载原项目GUI](https://github.com/Anjok07/ultimatevocalremovergui),勾选“GPU Conversion”。另外,可能会出现内存泄漏问题,主要体现在推理时。重启推理webUI可以释放内存。_ ### 使用Conda快速安装 ```bash diff --git a/docs/ja/README.md b/docs/ja/README.md index 181cafa..af5be46 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -39,12 +39,19 @@ Windows ユーザーであれば(win>=10 にてテスト済み)、prezip 経 ### Python と PyTorch のバージョン - Python 3.9, PyTorch 2.0.1, CUDA 11 - Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 -- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple Silicon, MPS) +- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon, GPU) _注記: numba==0.56.4 は py<3.11 が必要です_ ### Macユーザーへ -Macユーザーの方は、以下のコマンドを使用してインストールしてください。 +如果あなたがMacユーザーである場合、GPUを使用してトレーニングおよび推論を行うために以下の条件を満たしていることを確認してください: +- AppleシリコンまたはAMD GPUを搭載したMacコンピューター +- macOS 12.3以降 +- `xcode-select --install`を実行してインストールされたXcodeコマンドラインツール + +_その他のMacはCPUのみで推論を行うことができます。_ + +次に、以下のコマンドを使用してインストールします: #### 環境作成 ```bash conda create -n GPTSoVits python=3.9 @@ -56,7 +63,7 @@ pip install -r requirements.txt pip uninstall torch torchaudio pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu ``` -_注記: UVR5を使用した前処理には、[元のプロジェクトGUIをダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、操作にGPUを選択することを推奨します。さらに、Macを使用して推論する際にメモリリークの問題が発生する可能性がありますが、推論のwebUIを再起動することでメモリを解放できます。_ +_注記: UVR5を使用して前処理を行う場合は、[オリジナルプロジェクトのGUIをダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、「GPU Conversion」を選択することをお勧めします。さらに、特に推論時にメモリリークの問題が発生する可能性があります。推論webUIを再起動することでメモリを解放することができます。_ ### Conda によるクイックインストール ```bash From 0e9398705b7d8b476cf5d5dd6591c82704c76eca Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Sat, 27 Jan 2024 22:54:22 +0800 Subject: [PATCH 103/126] Update it_IT.json --- i18n/locale/it_IT.json | 375 ++++++++++++++++++++++++++++------------- 1 file changed, 258 insertions(+), 117 deletions(-) diff --git a/i18n/locale/it_IT.json b/i18n/locale/it_IT.json index dc089be..f34395a 100644 --- a/i18n/locale/it_IT.json +++ b/i18n/locale/it_IT.json @@ -1,135 +1,276 @@ { - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Se >=3: applica il filtro mediano ai risultati del pitch raccolto. ", - "A模型权重": "Peso (w) per il modello A:", - "A模型路径": "Percorso per il modello A:", - "B模型路径": "Percorso per il modello B:", + "很遗憾您这没有能用的显卡来支持您训练": "Purtroppo non hai una scheda grafica utilizzabile per supportare il tuo addestramento", + "UVR5已开启": "UVR5 è attivato", + "UVR5已关闭": "UVR5 è disattivato", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile.
Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principaleLICENSE per i dettagli.", + "0-前置数据集获取工具": "0-Strumento di acquisizione del dataset preliminare", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-Strumento di separazione voce e accompagnamento UVR5 & Rimozione riverbero e ritardo", + "是否开启UVR5-WebUI": "Attivare UVR5-WebUI", + "UVR5进程输出信息": "Informazioni sull'output del processo UVR5", + "0b-语音切分工具": "0b-Strumento di segmentazione vocale", + "音频自动切分输入路径,可文件可文件夹": "Percorso di input per la segmentazione automatica dell'audio, può essere un file o una cartella", + "切分后的子音频的输出根目录": "Directory radice di output per gli audio segmentati", + "threshold:音量小于这个值视作静音的备选切割点": "threshold: Punto di taglio alternativo considerato silenzioso se il volume è inferiore a questo valore", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: Lunghezza minima di ogni segmento. Se il primo segmento è troppo corto, verrà unito agli segmenti successivi fino a superare questo valore", + "min_interval:最短切割间隔": "min_interval: Intervallo minimo di taglio", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: Come calcolare la curva del volume. Più piccolo è, maggiore è la precisione ma aumenta la complessità computazionale (non significa che una maggiore precisione dà risultati migliori)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: Massima durata del silenzio dopo il taglio", + "开启语音切割": "Attivare la segmentazione vocale", + "终止语音切割": "Terminare la segmentazione vocale", + "max:归一化后最大值多少": "max: Massimo valore dopo la normalizzazione", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Quanta proporzione dell'audio normalizzato deve essere miscelata", + "切割使用的进程数": "Numero di processi utilizzati per il taglio", + "语音切割进程输出信息": "Informazioni sull'output del processo di segmentazione vocale", + "0c-中文批量离线ASR工具": "0c-Strumento di ASR offline batch in cinese", + "开启离线批量ASR": "Attivare ASR offline batch", + "终止ASR进程": "Terminare il processo ASR", + "批量ASR(中文only)输入文件夹路径": "Percorso della cartella di input per ASR offline batch (solo cinese)", + "ASR进程输出信息": "Informazioni sull'output del processo ASR", + "0d-语音文本校对标注工具": "0d-Strumento di correzione e annotazione testo vocale", + "是否开启打标WebUI": "Attivare l'interfaccia utente Web di annotazione", + "打标数据标注文件路径": "Percorso del file di annotazione dei dati contrassegnati", + "打标工具进程输出信息": "Informazioni sull'output del processo di annotazione", + "1-GPT-SoVITS-TTS": "1-GPT-SoVITS-TTS", + "*实验/模型名": "*Nome dell'esperimento/modello", + "显卡信息": "Informazioni sulla scheda grafica", + "预训练的SoVITS-G模型路径": "Percorso del modello preaddestrato SoVITS-G", + "预训练的SoVITS-D模型路径": "Percorso del modello preaddestrato SoVITS-D", + "预训练的GPT模型路径": "Percorso del modello preaddestrato GPT", + "1A-训练集格式化工具": "1A-Strumento di formattazione del set di addestramento", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Nella cartella logs/nome dell'esperimento dovrebbero esserci file e cartelle che iniziano con 23456", + "*文本标注文件": "*File di annotazione del testo", + "*训练集音频文件目录": "*Directory dei file audio del set di addestramento", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Directory dei file audio del set di addestramento, concatenare il nome del file corrispondente nella lista", + "1Aa-文本内容": "1Aa-Contenuto del testo", + "GPU卡号以-分割,每个卡号一个进程": "Numero di GPU separati da '-'; ogni numero corrisponde a un processo", + "预训练的中文BERT模型路径": "Percorso del modello BERT cinese preaddestrato", + "开启文本获取": "Attivare l'estrazione del testo", + "终止文本获取进程": "Terminare il processo di estrazione del testo", + "文本进程输出信息": "Informazioni sull'output del processo di estrazione del testo", + "1Ab-SSL自监督特征提取": "1Ab-Estrazione di caratteristiche auto-supervisionata SSL", + "预训练的SSL模型路径": "Percorso del modello SSL preaddestrato", + "开启SSL提取": "Attivare l'estrazione SSL", + "终止SSL提取进程": "Terminare il processo di estrazione SSL", + "SSL进程输出信息": "Informazioni sull'output del processo SSL", + "1Ac-语义token提取": "1Ac-Estrazione del token semantico", + "开启语义token提取": "Attivare l'estrazione del token semantico", + "终止语义token提取进程": "Terminare il processo di estrazione del token semantico", + "语义token提取进程输出信息": "Informazioni sull'output del processo di estrazione del token semantico", + "1Aabc-训练集格式化一键三连": "1Aabc-Strumento di formattazione del set di addestramento con tre passaggi", + "开启一键三连": "Attivare la formattazione con tre passaggi", + "终止一键三连": "Terminare la formattazione con tre passaggi", + "一键三连进程输出信息": "Informazioni sull'output del processo di 'One Click Three Connect'", + "1B-微调训练": "1B-Allenamento di affinamento", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-Allenamento di SoVITS. I file del modello destinati alla condivisione sono salvati in SoVITS_weights.", + "每张显卡的batch_size": "Batch size per ogni scheda grafica", + "总训练轮数total_epoch,不建议太高": "Numero totale di epoche di addestramento, non raccomandato troppo alto", + "文本模块学习率权重": "Peso del tasso di apprendimento del modulo di testo", + "保存频率save_every_epoch": "Frequenza di salvataggio ogni epoca", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Salvare solo il file ckpt più recente per risparmiare spazio su disco", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Salvare il modello finale più piccolo nella cartella weights ad ogni punto di salvataggio", + "开启SoVITS训练": "Attivare l'allenamento di SoVITS", + "终止SoVITS训练": "Terminare l'allenamento di SoVITS", + "SoVITS训练进程输出信息": "Informazioni sull'output del processo di allenamento di SoVITS", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-Allenamento di GPT. I file del modello destinati alla condivisione sono salvati in GPT_weights.", + "总训练轮数total_epoch": "Numero totale di epoche di addestramento", + "开启GPT训练": "Attivare l'allenamento di GPT", + "终止GPT训练": "Terminare l'allenamento di GPT", + "GPT训练进程输出信息": "Informazioni sull'output del processo di allenamento di GPT", + "1C-推理": "1C-Inferenza", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Scegli il modello salvato in SoVITS_weights e GPT_weights dopo l'addestramento. Uno di default è il modello di base, utilizzato per l'esperienza di Zero Shot TTS in 5 secondi.", + "*GPT模型列表": "*Lista dei modelli GPT", + "*SoVITS模型列表": "*Lista dei modelli SoVITS", + "GPU卡号,只能填1个整数": "Numero della scheda grafica, può essere inserito solo un numero intero", + "刷新模型路径": "Aggiorna il percorso del modello", + "是否开启TTS推理WebUI": "Attivare l'interfaccia utente Web per l'inferenza TTS", + "TTS推理WebUI进程输出信息": "Informazioni sull'output del processo dell'interfaccia utente Web per l'inferenza TTS", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voce modificata", + "施工中,请静候佳音": "In costruzione, attendi pazientemente le buone notizie", + "TTS推理进程已开启": "Il processo di inferenza TTS è stato avviato", + "TTS推理进程已关闭": "Il processo di inferenza TTS è stato chiuso", + "打标工具WebUI已开启": "L'interfaccia utente Web dello strumento di annotazione è stata avviata", + "打标工具WebUI已关闭": "L'interfaccia utente Web dello strumento di annotazione è stata chiusa", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT. L'autore non ha alcun controllo sul software. L'utente che utilizza il software o diffonde i suoni derivati dal software ne è responsabile. Se non accetti questi termini, non puoi utilizzare o citare alcun codice o file all'interno del pacchetto software. Vedi la cartella principale LICENSE per i dettagli.", + "*请上传并填写参考信息": "*Carica e compila le informazioni di riferimento", + "*请填写需要合成的目标文本": "*Compila il testo di destinazione da sintetizzare", + "ASR任务开启:%s": "Attività ASR avviata: %s", + "GPT训练完成": "Allenamento di GPT completato", + "GPT训练开始:%s": "Inizio dell'allenamento di GPT: %s", + "SSL提取进程执行中": "Processo di estrazione SSL in corso", + "SSL提取进程结束": "Processo di estrazione SSL completato", + "SoVITS训练完成": "Allenamento di SoVITS completato", + "SoVITS训练开始:%s": "Inizio dell'allenamento di SoVITS: %s", + "一键三连中途报错": "Errore durante 'One Click Three Connect'", + "一键三连进程结束": "Processo di 'One Click Three Connect' completato", + "中文": "Cinese", + "凑50字一切": "Riempire con 50 caratteri per tutto", + "凑五句一切": "Riempire con cinque frasi per tutto", + "切分后文本": "Testo dopo il taglio", + "切割执行中": "Taglio in corso", + "切割结束": "Taglio completato", + "参考音频的文本": "Testo dell'audio di riferimento", + "参考音频的语种": "Lingua dell'audio di riferimento", + "合成语音": "Sintesi vocale", + "后续将支持混合语种编码文本输入。": "In futuro sarà supportata l'input di testi con codifica mista di lingue.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": "È già in corso un'attività ASR. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di GPT. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "È già in corso un'attività di estrazione SSL. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "È già in corso un'attività di allenamento di SoVITS. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "È già in corso un'attività di 'One Click Three Connect'. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "È già in corso un'attività di taglio. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "È già in corso un'attività di testo. Devi interromperla prima di avviare una nuova attività.", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "È già in corso un'attività di estrazione di token semantici. Devi interromperla prima di avviare una nuova attività.", + "已终止ASR进程": "Il processo ASR è stato terminato", + "已终止GPT训练": "L'allenamento di GPT è stato terminato", + "已终止SoVITS训练": "Allenamento SoVITS terminato", + "已终止所有1a进程": "Processi 1a terminati", + "已终止所有1b进程": "Processi 1b terminati", + "已终止所有一键三连进程": "Processi One Click Three Connect terminati", + "已终止所有切割进程": "Processi di taglio terminati", + "已终止所有语义token进程": "Processi di estrazione token semantici terminati", + "按中文句号。切": "Taglia secondo il punto cinese.", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Strumento di divisione del testo. I testi troppo lunghi potrebbero non avere un buon effetto di sintesi, quindi è consigliabile dividerli prima della sintesi. La sintesi verrà separata in base ai ritorni a capo nel testo e successivamente ricomposta.", + "文本进程执行中": "Processo di testo in esecuzione", + "文本进程结束": "Processo di testo terminato", + "日文": "Giapponese", + "英文": "Inglese", + "语义token提取进程执行中": "Processo di estrazione token semantici in esecuzione", + "语义token提取进程结束": "Processo di estrazione token semantici terminato", + "请上传参考音频": "Carica l'audio di riferimento", + "输入路径不存在": "Il percorso di input non esiste", + "输入路径存在但既不是文件也不是文件夹": "Il percorso di input esiste ma non è né un file né una cartella", + "输出的语音": "Audio di output", + "进度:1a-done": "Progresso: 1a-done", + "进度:1a-done, 1b-ing": "Progresso: 1a-done, 1b-ing", + "进度:1a-ing": "Progresso: 1a-ing", + "进度:1a1b-done": "Progresso: 1a1b-done", + "进度:1a1b-done, 1cing": "Progresso: 1a1b-done, 1cing", + "进度:all-done": "Progresso: all-done", + "需要合成的切分前文本": "Testo da sintetizzare prima del taglio", + "需要合成的文本": "Testo da sintetizzare", + "需要合成的语种": "Lingua da sintetizzare", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Se >=3, usa il filtraggio mediano sui risultati del riconoscimento dell'altezza di harvest, il valore è il raggio del filtro. L'uso di questo valore può attenuare i suoni muti.", + "A模型权重": "Peso del modello A", + "A模型路径": "Percorso del modello A", + "B模型路径": "Percorso del modello B", "E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File curva F0 (opzionale). ", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File della curva F0, opzionale, una riga per un'altezza, sostituisce il F0 predefinito e le variazioni di tono", "Index Rate": "Tasso di indice", - "Onnx导出": "Esporta Onnx", - "Onnx输出路径": "Percorso di esportazione Onnx:", - "RVC模型路径": "Percorso modello RVC:", - "ckpt处理": "Elaborazione ckpt", - "harvest进程数": "harvest进程数", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "pth文件路径不可包含中文": "pth è un'app per il futuro", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passaggio 1: compilare la configurazione sperimentale. ", - "step1:正在处理数据": "Passaggio 1: elaborazione dei dati", - "step2:正在提取音高&正在提取特征": "step2:正在提取音高&正在提取特征", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passaggio 2a: attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio ed esegui la normalizzazione delle sezioni. ", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passaggio 2b: utilizzare la CPU per estrarre il tono (se il modello ha il tono), utilizzare la GPU per estrarre le caratteristiche (selezionare l'indice GPU):", - "step3: 填写训练设置, 开始训练模型和索引": "Passaggio 3: compilare le impostazioni di addestramento e avviare l'addestramento del modello e dell'indice", - "step3a:正在训练模型": "Passaggio 3a: è iniziato l'addestramento del modello", - "一键训练": "Addestramento con un clic", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Elaborazione batch per la separazione dell'accompagnamento vocale utilizzando il modello UVR5.
Esempio di un formato di percorso di cartella valido: D:\\path\\to\\input\\folder (copialo dalla barra degli indirizzi del file manager).
Il modello è suddiviso in tre categorie:
1. Conserva la voce: scegli questa opzione per l'audio senza armonie.
2. Mantieni solo la voce principale: scegli questa opzione per l'audio con armonie.
3. Modelli di de-riverbero e de-delay (di FoxJoy):
  (1) MDX-Net: la scelta migliore per la rimozione del riverbero stereo ma non può rimuovere il riverbero mono;

Note di de-riverbero/de-delay:
1. Il tempo di elaborazione per il modello DeEcho-DeReverb è circa il doppio rispetto agli altri due modelli DeEcho.
2. Il modello MDX-Net-Dereverb è piuttosto lento.
3. La configurazione più pulita consigliata consiste nell'applicare prima MDX-Net e poi DeEcho-Aggressive.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Inserisci gli indici GPU separati da '-', ad esempio 0-1-2 per utilizzare GPU 0, 1 e 2:", - "伴奏人声分离&去混响&去回声": "Separazione voce/accompagnamento", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "Salva nome:", - "保存的文件名, 默认空为和源文件同名": "Salva il nome del file (predefinito: uguale al file di origine):", - "保存的模型名不带后缀": "Nome del modello salvato (senza estensione):", - "保存频率save_every_epoch": "Frequenza di salvataggio (save_every_epoch):", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteggi le consonanti senza voce e i suoni del respiro per evitare artefatti come il tearing nella musica elettronica. ", - "修改": "Modificare", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni sul modello (supportato solo per i file di modello di piccole dimensioni estratti dalla cartella 'weights')", - "停止音频转换": "Arresta la conversione audio", - "全流程结束!": "Tutti i processi sono stati completati!", - "刷新音色列表和索引路径": "Aggiorna l'elenco delle voci e il percorso dell'indice", - "加载模型": "Carica modello", - "加载预训练底模D路径": "Carica il percorso D del modello base pre-addestrato:", - "加载预训练底模G路径": "Carica il percorso G del modello base pre-addestrato:", - "单次推理": "单次推理", - "卸载音色省显存": "Scarica la voce per risparmiare memoria della GPU:", - "变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):", - "后处理重采样至最终采样率,0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ", - "否": "NO", - "启用相位声码器": "启用相位声码器", + "Onnx导出": "Esporta in Onnx", + "Onnx输出路径": "Percorso di output Onnx", + "RVC模型路径": "Percorso del modello RVC", + "ckpt处理": "Elaborazione del ckpt", + "harvest进程数": "Numero di processi harvest", + "index文件路径不可包含中文": "Il percorso del file di indice non può contenere caratteri cinesi", + "pth文件路径不可包含中文": "Il percorso del file pth non può contenere caratteri cinesi", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Configurazione dei numeri delle schede rmvpe: separa con - i numeri delle schede dei diversi processi utilizzati in input. Ad esempio, 0-0-1 utilizza 2 processi sulla scheda 0 e 1 processo sulla scheda 1", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passo 1: Compila la configurazione sperimentale. I dati sperimentali sono salvati in logs, ogni esperimento in una cartella. È necessario inserire manualmente il percorso del nome dell'esperimento, contenente configurazione sperimentale, log e file di modello addestrato.", + "step1:正在处理数据": "Passo 1: Elaborazione dei dati in corso", + "step2:正在提取音高&正在提取特征": "Passo 2: Estrazione dell'altezza e delle caratteristiche in corso", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passo 2a: Attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio e li normalizza a fette. Nella cartella sperimentale vengono generate due cartelle wav; Al momento supporta solo l'addestramento singolo.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passo 2b: Usa la CPU per estrarre l'altezza (se il modello la include) e la GPU per estrarre le caratteristiche (scegliendo il numero della scheda)", + "step3: 填写训练设置, 开始训练模型和索引": "Passo 3: Compila le impostazioni di addestramento, inizia ad addestrare il modello e l'indice", + "step3a:正在训练模型": "Passo 3a: Addestramento del modello in corso", + "一键训练": "Allenamento One-Click", + "也可批量输入音频文件, 二选一, 优先读文件夹": "È possibile anche inserire file audio in batch, una delle due opzioni, con priorità alla lettura della cartella", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Numeri delle schede separati da - utilizzati in input, ad esempio 0-1-2, utilizzando le schede 0, 1 e 2", + "伴奏人声分离&去混响&去回声": "Separazione tra accompagnamento e voce & Rimozione dell'eco & Rimozione dell'eco", + "使用模型采样率": "Frequenza di campionamento del modello", + "使用设备采样率": "Frequenza di campionamento del dispositivo", + "保存名": "Nome del salvataggio", + "保存的文件名, 默认空为和源文件同名": "Nome del file salvato, vuoto di default è lo stesso del file sorgente", + "保存的模型名不带后缀": "Nome del modello salvato senza estensione", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protegge le consonanti chiare e i suoni di respirazione, evita artifact come la rottura del suono elettronico, tirare a 0.5 per disattivare, abbassare per aumentare la protezione ma potrebbe ridurre l'effetto di indicizzazione", + "修改": "Modifica", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)", + "停止音频转换": "Interrompi la conversione audio", + "全流程结束!": "Processo completo!", + "刷新音色列表和索引路径": "Aggiorna la lista dei toni e il percorso dell'indice", + "加载模型": "Carica il modello", + "加载预训练底模D路径": "Carica il percorso del modello di fondo preaddestrato D", + "加载预训练底模G路径": "Carica il percorso del modello di fondo preaddestrato G", + "单次推理": "Inferenza singola", + "卸载音色省显存": "Scarica il tono per risparmiare memoria video", + "变调(整数, 半音数量, 升八度12降八度-12)": "Modifica del tono (numero intero, quantità di semitoni, 12 per un'ottava in su, -12 per un'ottava in giù)", + "后处理重采样至最终采样率,0为不进行重采样": "Ricampiona in modo post-elaborazione alla frequenza di campionamento finale, 0 per non eseguire il ricampionamento", + "否": "No", + "启用相位声码器": "Abilita il codificatore di fase", "响应阈值": "Soglia di risposta", - "响度因子": "fattore di sonorità", - "处理数据": "Processa dati", - "导出Onnx模型": "Esporta modello Onnx", - "导出文件格式": "Formato file di esportazione", - "常见问题解答": "FAQ (Domande frequenti)", + "响度因子": "Fattore di risposta", + "处理数据": "Elaborazione dati", + "导出Onnx模型": "Esporta il modello Onnx", + "导出文件格式": "Formato di esportazione del file", + "常见问题解答": "Domande frequenti", "常规设置": "Impostazioni generali", - "开始音频转换": "Avvia la conversione audio", - "很遗憾您这没有能用的显卡来支持您训练": "Sfortunatamente, non è disponibile alcuna GPU compatibile per supportare l'addestramento.", - "性能设置": "Impostazioni delle prestazioni", - "总训练轮数total_epoch": "Epoch totali di addestramento (total_epoch):", - "批量推理": "批量推理", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione massiva. Inserisci il percorso della cartella che contiene i file da convertire o carica più file audio. I file convertiti finiranno nella cartella specificata. (default: opt) ", - "指定输出主人声文件夹": "Specifica la cartella di output per le voci:", - "指定输出文件夹": "Specifica la cartella di output:", - "指定输出非主人声文件夹": "Specificare la cartella di output per l'accompagnamento:", + "开始音频转换": "Inizia la conversione audio", + "性能设置": "Impostazioni di performance", + "批量推理": "Inferenza batch", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione in batch, inserisci la cartella con i file audio da convertire o carica più file audio, i file convertiti verranno salvati nella cartella specificata (per impostazione predefinita opt).", + "指定输出主人声文件夹": "Specifica la cartella di output per la voce principale", + "指定输出文件夹": "Specifica la cartella di output", + "指定输出非主人声文件夹": "Specifica la cartella di output per la non voce principale", "推理时间(ms):": "Tempo di inferenza (ms):", - "推理音色": "Voce di inferenza:", + "推理音色": "Tono di inferenza", "提取": "Estrai", - "提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione del tono e l'elaborazione dei dati:", - "是": "SÌ", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Salva solo l'ultimo file '.ckpt' per risparmiare spazio su disco:", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Salva un piccolo modello finale nella cartella \"weights\" in ogni punto di salvataggio:", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Memorizza nella cache tutti i set di addestramento nella memoria della GPU. ", - "显卡信息": "Informazioni GPU", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT.
Se non si accetta questa clausola, non è possibile utilizzare o fare riferimento a codici e file all'interno del pacchetto software. Contratto-LICENZA.txt per dettagli.", - "查看": "Visualizzazione", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni sul modello (supportato solo per file di modello piccoli estratti dalla cartella 'weights')", - "检索特征占比": "Rapporto funzionalità di ricerca (controlla la forza dell'accento, troppo alto ha artefatti):", + "提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione dell'altezza del suono e l'elaborazione dei dati", + "是": "Sì", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Se memorizzare nella cache tutto l'insieme di addestramento nella memoria video. Piccoli set di dati inferiori a 10 minuti possono essere memorizzati nella cache per accelerare l'addestramento, la memorizzazione nella cache di grandi set di dati può esaurire la memoria video e non accelerare di molto", + "查看": "Visualizza", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni del modello (supporta solo i piccoli file di modello estratti dalla cartella weights)", + "检索特征占比": "Percentuale di caratteristiche di ricerca", "模型": "Modello", "模型推理": "Inferenza del modello", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserire il percorso del modello di file di grandi dimensioni nella cartella \"logs\"). ", - "模型是否带音高指导": "Se il modello ha una guida del tono:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Se il modello ha una guida del tono (necessario per il canto, facoltativo per il parlato):", - "模型是否带音高指导,1是0否": "Se il modello ha una guida del tono (1: sì, 0: no):", - "模型版本型号": "Versione dell'architettura del modello:", - "模型融合, 可用于测试音色融合": "Model fusion, può essere utilizzato per testare la fusione timbrica", - "模型路径": "Percorso al modello:", - "每张显卡的batch_size": "Dimensione batch per GPU:", - "淡入淡出长度": "Lunghezza dissolvenza", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserisci il percorso del modello di grandi dimensioni nella cartella logs), adatto per i modelli a metà addestramento che non si desidera continuare ad addestrare, i modelli non estratti automaticamente vengono salvati come modelli di piccole dimensioni o per testare la situazione del modello intermedio", + "模型是否带音高指导": "Il modello include o meno la guida all'altezza del suono", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Il modello include o meno la guida all'altezza del suono (necessario per il canto, opzionale per la voce)", + "模型是否带音高指导,1是0否": "Il modello include o meno la guida all'altezza del suono, 1 sì, 0 no", + "模型版本型号": "Versione e modello del modello", + "模型融合, 可用于测试音色融合": "Fusione dei modelli, utile per testare la fusione dei toni", + "模型路径": "Percorso del modello", + "淡入淡出长度": "Lunghezza del fading in/fading out", "版本": "Versione", "特征提取": "Estrazione delle caratteristiche", - "特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file di indice delle caratteristiche. ", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tonalità +12 consigliata per la conversione da maschio a femmina e tonalità -12 per la conversione da femmina a maschio. ", - "目标采样率": "Frequenza di campionamento target:", - "算法延迟(ms):": "算法延迟(ms):", - "自动检测index路径,下拉式选择(dropdown)": "Rileva automaticamente il percorso dell'indice e seleziona dal menu a tendina:", + "特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file della libreria di ricerca delle caratteristiche, se vuoto usa la selezione a discesa", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Consigliato +12 toni per la trasformazione da uomo a donna, -12 toni per la trasformazione da donna a uomo. Se l'intervallo tonale esplode causando distorsioni nel timbro, è possibile regolarlo manualmente nell'intervallo adatto.", + "目标采样率": "Frequenza di campionamento obiettivo", + "算法延迟(ms):": "Ritardo dell'algoritmo (ms):", + "自动检测index路径,下拉式选择(dropdown)": "Rilevamento automatico del percorso dell'indice, selezione a discesa (dropdown)", "融合": "Fusione", - "要改的模型信息": "Informazioni sul modello da modificare:", - "要置入的模型信息": "Informazioni sul modello da posizionare:", + "要改的模型信息": "Informazioni del modello da modificare", + "要置入的模型信息": "Informazioni del modello da inserire", "训练": "Addestramento", - "训练模型": "Addestra modello", - "训练特征索引": "Addestra indice delle caratteristiche", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Addestramento completato. ", - "请指定说话人id": "Si prega di specificare l'ID del locutore/cantante:", - "请选择index文件": "请选择index文件", - "请选择pth文件": "请选择pth 文件", - "请选择说话人id": "Seleziona ID locutore/cantante:", - "转换": "Convertire", - "输入实验名": "Inserisci il nome dell'esperimento:", - "输入待处理音频文件夹路径": "Immettere il percorso della cartella audio da elaborare:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Immettere il percorso della cartella audio da elaborare (copiarlo dalla barra degli indirizzi del file manager):", - "输入待处理音频文件路径(默认是正确格式示例)": "Immettere il percorso del file audio da elaborare (l'impostazione predefinita è l'esempio di formato corretto):", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Regola il ridimensionamento dell'inviluppo del volume. ", - "输入监听": "输入监听", - "输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento:", + "训练模型": "Addestra il modello", + "训练特征索引": "Addestramento dell'indice delle caratteristiche", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Fine dell'addestramento, puoi visualizzare il registro di addestramento sulla console o il file train.log nella cartella dell'esperimento", + "请指定说话人id": "Si prega di specificare l'ID del parlante", + "请选择index文件": "Seleziona il file di indice", + "请选择pth文件": "Seleziona il file pth", + "请选择说话人id": "Seleziona l'ID del parlante", + "转换": "Converti", + "输入实验名": "Inserisci il nome dell'esperimento", + "输入待处理音频文件夹路径": "Inserisci il percorso della cartella dei file audio da elaborare", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Inserisci il percorso della cartella dei file audio da elaborare (copialo dalla barra degli indirizzi del gestore dei file)", + "输入待处理音频文件路径(默认是正确格式示例)": "Inserisci il percorso del file audio da elaborare (esempio di formato corretto predefinito)", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Inserisci la proporzione di fusione della sostituzione dell'involucro del volume di ingresso con l'involucro del volume di uscita, più vicino a 1 più utilizza l'involucro di uscita", + "输入监听": "Inserisci l'ascolto", + "输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento", "输入设备": "Dispositivo di input", - "输入降噪": "Riduzione del rumore in ingresso", - "输出信息": "Informazioni sull'uscita", - "输出变声": "输出变声", - "输出设备": "Dispositivo di uscita", - "输出降噪": "Riduzione del rumore in uscita", - "输出音频(右下角三个点,点了可以下载)": "Esporta audio (clicca sui tre puntini in basso a destra per scaricarlo)", + "输入降噪": "Inserisci la riduzione del rumore", + "输出信息": "Informazioni di output", + "输出变声": "Variazione della voce in output", + "输出设备": "Dispositivo di output", + "输出降噪": "Riduzione del rumore in output", + "输出音频(右下角三个点,点了可以下载)": "Audio in output (tre punti nell'angolo in basso a destra, fare clic per scaricare)", "选择.index文件": "Seleziona il file .index", "选择.pth文件": "Seleziona il file .pth", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione del tono (\"pm\": estrazione più veloce ma risultato di qualità inferiore; \"harvest\": bassi migliori ma estremamente lenti; \"crepe\": qualità migliore ma utilizzo intensivo della GPU):", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU", - "采样率:": "采样率:", - "采样长度": "Lunghezza del campione", - "重载设备列表": "Ricaricare l'elenco dei dispositivi", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono, l'input vocale può utilizzare pm per velocizzare, harvest ha bassi migliori ma è incredibilmente lento, crepe ha un buon effetto ma consuma molte risorse della GPU, rmvpe ha il miglior effetto ed è leggermente esigente sulla GPU", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleziona l'algoritmo di estrazione dell'altezza del suono: l'input vocale può utilizzare pm per velocizzare, la qualità del suono è elevata ma richiede molte risorse della CPU; l'input vocale può utilizzare dio per velocizzare, harvest ha una qualità del suono migliore ma è lento, rmvpe ha il miglior effetto ed è leggermente esigente sulla CPU/GPU", + "采样率:": "Frequenza di campionamento:", + "采样长度": "Lunghezza del campionamento", + "重载设备列表": "Ricarica la lista dei dispositivi", "音调设置": "Impostazioni del tono", - "音频设备(请使用同种类驱动)": "Dispositivo audio (utilizzare lo stesso tipo di driver)", - "音高算法": "音高算法", + "音频设备(请使用同种类驱动)": "Dispositivo audio (usa driver della stessa categoria)", + "音高算法": "Algoritmo dell'altezza del suono", "额外推理时长": "Tempo di inferenza extra" -} + } From 3e4a03660b412c1ebfd6b5e7cb4662c2664de212 Mon Sep 17 00:00:00 2001 From: duliangang Date: Sun, 28 Jan 2024 16:28:24 +0800 Subject: [PATCH 104/126] fix chinese number to pinyin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 数字转拼音不准的bug,因为将数字一个个分割了调用an2cn当然不准 --- GPT_SoVITS/text/chinese.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index de3ef01..bb631a0 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -12,6 +12,8 @@ sys.path.append("/data/docker/liujing04/gpt-vits/gpt-vits-master") from text.symbols import punctuation from text.tone_sandhi import ToneSandhi +normalizer = lambda x: cn2an.transform(x, "an2cn") + current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { line.split("\t")[0]: line.strip().split("\t")[1] @@ -151,10 +153,8 @@ def _g2p(segments): def text_normalize(text): - numbers = re.findall(r"\d+(?:\.?\d+)?", text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) + dest_text=normalizer(text) + text = replace_punctuation(dest_text) return text From 2d69f103a3eb78e21a2230998b5ea04f0d36ad0a Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 17:58:17 +0800 Subject: [PATCH 105/126] Revert "fix chinese number to pinyin" --- GPT_SoVITS/text/chinese.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index bb631a0..de3ef01 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -12,8 +12,6 @@ sys.path.append("/data/docker/liujing04/gpt-vits/gpt-vits-master") from text.symbols import punctuation from text.tone_sandhi import ToneSandhi -normalizer = lambda x: cn2an.transform(x, "an2cn") - current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { line.split("\t")[0]: line.strip().split("\t")[1] @@ -153,8 +151,10 @@ def _g2p(segments): def text_normalize(text): - dest_text=normalizer(text) - text = replace_punctuation(dest_text) + numbers = re.findall(r"\d+(?:\.?\d+)?", text) + for number in numbers: + text = text.replace(number, cn2an.an2cn(number), 1) + text = replace_punctuation(text) return text From ecb4b23fc311462d78d5c72ff60e309bb9853d0f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 19:19:57 +0800 Subject: [PATCH 106/126] Update data_module.py --- GPT_SoVITS/AR/data/data_module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/AR/data/data_module.py b/GPT_SoVITS/AR/data/data_module.py index f3d895a..037484a 100644 --- a/GPT_SoVITS/AR/data/data_module.py +++ b/GPT_SoVITS/AR/data/data_module.py @@ -41,7 +41,7 @@ class Text2SemanticDataModule(LightningDataModule): # pad_val=self.config['data']['pad_val']) def train_dataloader(self): - batch_size = self.config["train"]["batch_size"] + batch_size = max(min(self.config["train"]["batch_size"],len(self._train_dataset)//4),1)#防止不保存 sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size) return DataLoader( self._train_dataset, From f0cfe397089a6fd507d678c71adeaab5e7ed0683 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 19:34:03 +0800 Subject: [PATCH 107/126] fix gpt not save issue. --- GPT_SoVITS/s1_train.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/GPT_SoVITS/s1_train.py b/GPT_SoVITS/s1_train.py index 30c167e..3bbfdfb 100644 --- a/GPT_SoVITS/s1_train.py +++ b/GPT_SoVITS/s1_train.py @@ -44,9 +44,8 @@ class my_model_ckpt(ModelCheckpoint): self.config = config def on_train_epoch_end(self, trainer, pl_module): - if not self._should_skip_saving_checkpoint( - trainer - ) and self._should_save_on_train_epoch_end(trainer): + # if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer): + if self._should_save_on_train_epoch_end(trainer): monitor_candidates = self._monitor_candidates(trainer) if ( self._every_n_epochs >= 1 From 34a5ad7baa279c8a4674b02ebcb7ed9d57491f3f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 19:50:59 +0800 Subject: [PATCH 108/126] Add files via upload --- webui.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/webui.py b/webui.py index 885aecb..fba65ec 100644 --- a/webui.py +++ b/webui.py @@ -25,20 +25,13 @@ for path in site.getsitepackages(): if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir] #os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -os.environ["all_proxy"] = "" - for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): - try: - with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir) - ) - break - except PermissionError: - pass - + with open("%s/users.pth" % (site_packages_root), "w") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) from tools import my_utils import traceback import shutil @@ -662,7 +655,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: with gr.Row(): if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True) path_list = gr.Textbox( - label=i18n("打标数据标注文件路径"), + label=i18n(".list标注文件的路径"), value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list", interactive=True, ) @@ -688,7 +681,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: label=i18n("*训练集音频文件目录"), # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", interactive=True, - placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。") + placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。") ) gr.Markdown(value=i18n("1Aa-文本内容")) with gr.Row(): From 70a9c3d3e840051ea84f42d9a15492748594e83a Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 19:51:55 +0800 Subject: [PATCH 109/126] Update webui.py --- webui.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index fba65ec..23d803d 100644 --- a/webui.py +++ b/webui.py @@ -25,13 +25,18 @@ for path in site.getsitepackages(): if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir] #os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" +os.environ["all_proxy"] = "" for site_packages_root in site_packages_roots: if os.path.exists(site_packages_root): - with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir) - ) + try: + with open("%s/users.pth" % (site_packages_root), "w") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) + break + except PermissionError: + pass from tools import my_utils import traceback import shutil From b8ae5a2761e2654fc0c905498009d3de9de745a8 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 20:10:36 +0800 Subject: [PATCH 110/126] Add files via upload --- GPT_SoVITS/inference_webui.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index f725c5c..454a372 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -29,10 +29,10 @@ else: # ) # sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth") cnhubert_base_path = os.environ.get( - "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base" + "cnhubert_base_path", "pretrained_models/chinese-hubert-base" ) bert_path = os.environ.get( - "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" + "bert_path", "pretrained_models/chinese-roberta-wwm-ext-large" ) infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) @@ -262,17 +262,19 @@ def nonen_get_bert_inf(text, language): return bert -#i18n("不切"),i18n("凑五句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切") def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,how_to_cut=i18n("不切")): t0 = ttime() prompt_text = prompt_text.strip("\n") - prompt_language, text = prompt_language, text.strip("\n") + if(prompt_text[-1]not in splits):prompt_text+="。"if prompt_text!="en"else "." + text = text.strip("\n") zero_wav = np.zeros( int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32, ) with torch.no_grad(): wav16k, sr = librosa.load(ref_wav_path, sr=16000) + if(wav16k.shape[0]>160000 or wav16k.shape[0]<48000): + raise OSError(i18n("参考音频在3~10秒范围外,请更换!")) wav16k = torch.from_numpy(wav16k) zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: @@ -297,7 +299,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language) else: phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language) - if(how_to_cut==i18n("凑五句一切")):text=cut1(text) + if(how_to_cut==i18n("凑四句一切")):text=cut1(text) elif(how_to_cut==i18n("凑50字一切")):text=cut2(text) elif(how_to_cut==i18n("按中文句号。切")):text=cut3(text) elif(how_to_cut==i18n("按英文句号.切")):text=cut4(text) @@ -409,7 +411,7 @@ def split(todo_text): def cut1(inp): inp = inp.strip("\n") inps = split(inp) - split_idx = list(range(0, len(inps), 5)) + split_idx = list(range(0, len(inps), 4)) split_idx[-1] = None if len(split_idx) > 1: opts = [] @@ -423,7 +425,6 @@ def cut1(inp): def cut2(inp): inp = inp.strip("\n") inps = split(inp) - # print(inps) if len(inps) < 2: return inp opts = [] @@ -494,7 +495,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: GPT_dropdown.change(change_gpt_weights,[GPT_dropdown],[]) gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): - inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") + inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath") prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") prompt_language = gr.Dropdown( label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") @@ -507,7 +508,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: ) how_to_cut = gr.Radio( label=i18n("怎么切"), - choices=[i18n("不切"),i18n("凑五句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切"),], + choices=[i18n("不切"),i18n("凑四句一切"),i18n("凑50字一切"),i18n("按中文句号。切"),i18n("按英文句号.切"),], value=i18n("凑50字一切"), interactive=True, ) @@ -523,7 +524,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。")) with gr.Row(): text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"),value="") - button1 = gr.Button(i18n("凑五句一切"), variant="primary") + button1 = gr.Button(i18n("凑四句一切"), variant="primary") button2 = gr.Button(i18n("凑50字一切"), variant="primary") button3 = gr.Button(i18n("按中文句号。切"), variant="primary") button4 = gr.Button(i18n("按英文句号.切"), variant="primary") From 96128d604ad919a2ff6bf486f351cb4f1b4656d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=82=A6?= Date: Sun, 28 Jan 2024 20:17:35 +0800 Subject: [PATCH 111/126] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E5=8F=AF=E8=83=BD=E4=B8=8D=E5=AD=98=E5=9C=A8=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修改文件可能不存在的bug --- webui.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 23d803d..6577705 100644 --- a/webui.py +++ b/webui.py @@ -16,7 +16,11 @@ if(os.path.exists(tmp)): if(name=="jieba.cache"):continue path="%s/%s"%(tmp,name) delete=os.remove if os.path.isfile(path) else shutil.rmtree - delete(path) + try: + delete(path) + except Exception as e: + print(str(e)) + pass import site site_packages_roots = [] for path in site.getsitepackages(): From 698e9655132d194b25b86fbbc99d53c8d2cea2a3 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 20:18:44 +0800 Subject: [PATCH 112/126] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BC=80=E5=A4=B4?= =?UTF-8?q?=E5=90=9E=E5=AD=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/inference_webui.py | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 454a372..d601d9f 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -29,10 +29,10 @@ else: # ) # sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth") cnhubert_base_path = os.environ.get( - "cnhubert_base_path", "pretrained_models/chinese-hubert-base" + "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base" ) bert_path = os.environ.get( - "bert_path", "pretrained_models/chinese-roberta-wwm-ext-large" + "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" ) infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) @@ -262,11 +262,18 @@ def nonen_get_bert_inf(text, language): return bert +splits = {",","。","?","!",",",".","?","!","~",":",":","—","…",} +def get_first(text): + pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]" + text = re.split(pattern, text)[0].strip() + return text + def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,how_to_cut=i18n("不切")): t0 = ttime() prompt_text = prompt_text.strip("\n") if(prompt_text[-1]not in splits):prompt_text+="。"if prompt_text!="en"else "." text = text.strip("\n") + if(len(get_first(text))<4):text+="。"if text!="en"else "." zero_wav = np.zeros( int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32, @@ -371,24 +378,6 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, np.int16 ) - -splits = { - ",", - "。", - "?", - "!", - ",", - ".", - "?", - "!", - "~", - ":", - ":", - "—", - "…", -} # 不考虑省略号 - - def split(todo_text): todo_text = todo_text.replace("……", "。").replace("——", ",") if todo_text[-1] not in splits: From bdcbc46cb02d300caf4bc2f80f8c35bcc3135ffe Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 20:22:58 +0800 Subject: [PATCH 113/126] Update chinese.py --- GPT_SoVITS/text/chinese.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/GPT_SoVITS/text/chinese.py b/GPT_SoVITS/text/chinese.py index de3ef01..fbcfa50 100644 --- a/GPT_SoVITS/text/chinese.py +++ b/GPT_SoVITS/text/chinese.py @@ -5,13 +5,11 @@ import re import cn2an from pypinyin import lazy_pinyin, Style -import sys - -sys.path.append("/data/docker/liujing04/gpt-vits/gpt-vits-master") - from text.symbols import punctuation from text.tone_sandhi import ToneSandhi +normalizer = lambda x: cn2an.transform(x, "an2cn") + current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { line.split("\t")[0]: line.strip().split("\t")[1] @@ -151,10 +149,8 @@ def _g2p(segments): def text_normalize(text): - numbers = re.findall(r"\d+(?:\.?\d+)?", text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) + dest_text=normalizer(text) + text = replace_punctuation(dest_text) return text From bee684828ceb670f925e9d699071b93160549c98 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 20:25:00 +0800 Subject: [PATCH 114/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index beaba42..80cc679 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -46,5 +46,19 @@ 7-自动识别不支持半精度的卡强制单精度。cpu推理下强制单精度。 +### 20240126更新 + +1-修复数字转汉字念法问题 + +2-修复句首少量字容易吞字的问题 + +3-通过限制排除不合理的参考音频长度 + +4-修复GPT训练不保存ckpt的问题 + +5-完善Dockerfile的下载模型流程 + + + From ed355dbc5e2f3d837da8adb2aeed3355ae9cadc8 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 21:39:18 +0800 Subject: [PATCH 115/126] Add files via upload From 329d99fb51f3d5fb7ccb6058b58057a48949254f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 21:39:32 +0800 Subject: [PATCH 116/126] Add files via upload --- tools/uvr5/webui.py | 90 +++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index e09830d..1d873ff 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -5,8 +5,7 @@ from tools.i18n.i18n import I18nAuto i18n = I18nAuto() logger = logging.getLogger(__name__) -import librosa -import soundfile as sf +import ffmpeg import torch import sys from mdxnet import MDXNetDereverb @@ -20,8 +19,7 @@ for name in os.listdir(weight_uvr5_root): device=sys.argv[1] is_half=sys.argv[2] -webui_port_uvr5=int(sys.argv[3]) -is_share=eval(sys.argv[4]) + def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] @@ -52,55 +50,52 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format paths = [path.name for path in paths] for path in paths: inp_path = os.path.join(inp_root, path) - if(os.path.isfile(inp_path)==False):continue + need_reformat = 1 + done = 0 try: - done = 0 - try: - y, sr = librosa.load(inp_path, sr=None) - info = sf.info(inp_path) - channels = info.channels - if channels == 2 and sr == 44100: - need_reformat = 0 - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 - ) - done = 1 - else: - need_reformat = 1 - except: - need_reformat = 1 - traceback.print_exc() - if need_reformat == 1: - tmp_path = "%s/%s.reformatted.wav" % ( - os.path.join(os.environ["TEMP"]), - os.path.basename(inp_path), + info = ffmpeg.probe(inp_path, cmd="ffprobe") + if ( + info["streams"][0]["channels"] == 2 + and info["streams"][0]["sample_rate"] == "44100" + ): + need_reformat = 0 + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) - y_resampled = librosa.resample(y, sr, 44100) - sf.write(tmp_path, y_resampled, 44100, "PCM_16") - inp_path = tmp_path + done = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["TEMP"]), + os.path.basename(inp_path), + ) + os.system( + "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" + % (inp_path, tmp_path) + ) + inp_path = tmp_path + try: + if done == 0: + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: try: if done == 0: pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + inp_path, save_root_ins, save_root_vocal, format0 ) infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) except: - try: - if done == 0: - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: - infos.append( - "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) - ) - yield "\n".join(infos) - except: - infos.append("Oh my god. %s->%s"%(os.path.basename(inp_path), traceback.format_exc())) - yield "\n".join(infos) + infos.append( + "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) + ) + yield "\n".join(infos) except: infos.append(traceback.format_exc()) yield "\n".join(infos) @@ -148,7 +143,7 @@ with gr.Blocks(title="RVC WebUI") as app: minimum=0, maximum=20, step=1, - label=i18n("人声提取激进程度"), + label="人声提取激进程度", value=10, interactive=True, visible=False, # 先不开放调整 @@ -184,7 +179,6 @@ with gr.Blocks(title="RVC WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - share=is_share, - server_port=webui_port_uvr5, + server_port=9873, quiet=True, -) +) \ No newline at end of file From 2c72a8fcd4320a1d4fdfa5d128e00d5d6e7d466b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 21:40:52 +0800 Subject: [PATCH 117/126] Update webui.py --- tools/uvr5/webui.py | 90 ++++++++++++++++++++++++--------------------- 1 file changed, 48 insertions(+), 42 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 1d873ff..e09830d 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -5,7 +5,8 @@ from tools.i18n.i18n import I18nAuto i18n = I18nAuto() logger = logging.getLogger(__name__) -import ffmpeg +import librosa +import soundfile as sf import torch import sys from mdxnet import MDXNetDereverb @@ -19,7 +20,8 @@ for name in os.listdir(weight_uvr5_root): device=sys.argv[1] is_half=sys.argv[2] - +webui_port_uvr5=int(sys.argv[3]) +is_share=eval(sys.argv[4]) def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] @@ -50,52 +52,55 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format paths = [path.name for path in paths] for path in paths: inp_path = os.path.join(inp_root, path) - need_reformat = 1 - done = 0 + if(os.path.isfile(inp_path)==False):continue try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if ( - info["streams"][0]["channels"] == 2 - and info["streams"][0]["sample_rate"] == "44100" - ): - need_reformat = 0 - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + done = 0 + try: + y, sr = librosa.load(inp_path, sr=None) + info = sf.info(inp_path) + channels = info.channels + if channels == 2 and sr == 44100: + need_reformat = 0 + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + ) + done = 1 + else: + need_reformat = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["TEMP"]), + os.path.basename(inp_path), ) - done = 1 - except: - need_reformat = 1 - traceback.print_exc() - if need_reformat == 1: - tmp_path = "%s/%s.reformatted.wav" % ( - os.path.join(os.environ["TEMP"]), - os.path.basename(inp_path), - ) - os.system( - "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" - % (inp_path, tmp_path) - ) - inp_path = tmp_path - try: - if done == 0: - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: + y_resampled = librosa.resample(y, sr, 44100) + sf.write(tmp_path, y_resampled, 44100, "PCM_16") + inp_path = tmp_path try: if done == 0: pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 ) infos.append("%s->Success" % (os.path.basename(inp_path))) yield "\n".join(infos) except: - infos.append( - "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) - ) - yield "\n".join(infos) + try: + if done == 0: + pre_fun._path_audio_( + inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3 + ) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + infos.append( + "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) + ) + yield "\n".join(infos) + except: + infos.append("Oh my god. %s->%s"%(os.path.basename(inp_path), traceback.format_exc())) + yield "\n".join(infos) except: infos.append(traceback.format_exc()) yield "\n".join(infos) @@ -143,7 +148,7 @@ with gr.Blocks(title="RVC WebUI") as app: minimum=0, maximum=20, step=1, - label="人声提取激进程度", + label=i18n("人声提取激进程度"), value=10, interactive=True, visible=False, # 先不开放调整 @@ -179,6 +184,7 @@ with gr.Blocks(title="RVC WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - server_port=9873, + share=is_share, + server_port=webui_port_uvr5, quiet=True, -) \ No newline at end of file +) From 1a876935288a160bb996f9b9a7d9a607e7ae97ba Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 21:47:27 +0800 Subject: [PATCH 118/126] Update my_utils.py --- tools/my_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/my_utils.py b/tools/my_utils.py index ce61984..a7755d6 100644 --- a/tools/my_utils.py +++ b/tools/my_utils.py @@ -1,4 +1,4 @@ -import platform,os +import platform,os,traceback import ffmpeg import numpy as np @@ -9,12 +9,17 @@ def load_audio(file, sr): # This launches a subprocess to decode audio while down-mixing and resampling as necessary. # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车 + if os.path.exists(file) == False: + raise RuntimeError( + "You input a wrong audio path that does not exists, please fix it!" + ) out, _ = ( ffmpeg.input(file, threads=0) .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) ) except Exception as e: + traceback.print_exc() raise RuntimeError(f"Failed to load audio: {e}") return np.frombuffer(out, np.float32).flatten() From 0945de3b7f94e6b9058c5b54fbe7995408fde803 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 21:52:07 +0800 Subject: [PATCH 119/126] Update webui.py --- tools/uvr5/webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index e09830d..107f23d 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -120,7 +120,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format yield "\n".join(infos) -with gr.Blocks(title="RVC WebUI") as app: +with gr.Blocks(title="UVR5 WebUI") as app: gr.Markdown( value= i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") From 4cb974a6cd8c8054b354c61daef2d7873db04a45 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:01:47 +0800 Subject: [PATCH 120/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index 80cc679..2983f87 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -46,7 +46,7 @@ 7-自动识别不支持半精度的卡强制单精度。cpu推理下强制单精度。 -### 20240126更新 +### 20240128更新 1-修复数字转汉字念法问题 @@ -58,7 +58,7 @@ 5-完善Dockerfile的下载模型流程 - +todolist:中文多音字推理优化 From ff977a5f5dc547e0ad82b9e0f1cd95fbc830b2b0 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 11:12:23 +0800 Subject: [PATCH 121/126] Add files via upload --- webui.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 6577705..9a53601 100644 --- a/webui.py +++ b/webui.py @@ -221,6 +221,9 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s data=json.loads(data) s2_dir="%s/%s"%(exp_root,exp_name) os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True) + if(is_half==False): + data["train"]["fp16_run"]=False + batch_size=max(1,batch_size//2) data["train"]["batch_size"]=batch_size data["train"]["epochs"]=total_epoch data["train"]["text_low_lr_rate"]=text_low_lr_rate @@ -233,7 +236,7 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir data["save_weight_dir"]=SoVITS_weight_root data["name"]=exp_name - tmp_config_path="TEMP/tmp_s2.json" + tmp_config_path="%s/tmp_s2.json"%tmp with open(tmp_config_path,"w")as f:f.write(json.dumps(data)) cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path) @@ -262,6 +265,9 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights data=yaml.load(data, Loader=yaml.FullLoader) s1_dir="%s/%s"%(exp_root,exp_name) os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True) + if(is_half==False): + data["train"]["precision"]="32" + batch_size = max(1, batch_size // 2) data["train"]["batch_size"]=batch_size data["train"]["epochs"]=total_epoch data["pretrained_s1"]=pretrained_s1 @@ -276,7 +282,7 @@ def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_numbers.replace("-",",") os.environ["hz"]="25hz" - tmp_config_path="TEMP/tmp_s1.yaml" + tmp_config_path="%s/tmp_s1.yaml"%tmp with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path) From dd4296a7166a134a0d1192b6e9abec1ed940ac52 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 11:32:13 +0800 Subject: [PATCH 122/126] Update Changelog_CN.md --- docs/cn/Changelog_CN.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md index 2983f87..5298c1b 100644 --- a/docs/cn/Changelog_CN.md +++ b/docs/cn/Changelog_CN.md @@ -60,5 +60,8 @@ todolist:中文多音字推理优化 +### 20240129更新 + +1-16系等半精度训练有问题的显卡把训练配置改为单精度训练 From 172e139f45ac26723bc2cf7fac0112f69d6b46ec Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 23:02:52 +0800 Subject: [PATCH 123/126] =?UTF-8?q?=E4=BD=BF=E7=94=A8=20Colaboratory=20?= =?UTF-8?q?=E5=88=9B=E5=BB=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- colab_webui.ipynb | 96 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 colab_webui.ipynb diff --git a/colab_webui.ipynb b/colab_webui.ipynb new file mode 100644 index 0000000..21722da --- /dev/null +++ b/colab_webui.ipynb @@ -0,0 +1,96 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "source": [ + "环境配置 environment" + ], + "metadata": { + "id": "_o6a8GS2lWQM" + } + }, + { + "cell_type": "code", + "metadata": { + "id": "e9b7iFV3dm1f" + }, + "source": [ + "!pip install -q condacolab\n", + "# Setting up condacolab and installing packages\n", + "import condacolab\n", + "condacolab.install_from_url(\"https://repo.anaconda.com/miniconda/Miniconda3-py39_23.11.0-2-Linux-x86_64.sh\")\n", + "%cd -q /content\n", + "!git clone https://github.com/RVC-Boss/GPT-SoVITS\n", + "!conda install -y -q -c pytorch -c nvidia cudatoolkit\n", + "%cd -q /content/GPT-SoVITS\n", + "!conda install -y -q -c conda-forge gcc gxx ffmpeg cmake -c pytorch -c nvidia\n", + "!/usr/local/bin/pip install -r requirements.txt" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @title Download pretrained models 下载预训练模型\n", + "!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", + "!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n", + "!mkdir -p /content/GPT-SoVITS/tools/uvr5\n", + "%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n", + "!git clone https://huggingface.co/lj1995/GPT-SoVITS\n", + "%cd /content/GPT-SoVITS/tools/damo_asr/models\n", + "!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n", + "!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n", + "!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n", + "# @title UVR5 pretrains 安装uvr5模型\n", + "%cd /content/GPT-SoVITS/tools/uvr5\n", + "!git clone https://huggingface.co/Delik/uvr5_weights\n", + "!git config core.sparseCheckout true\n", + "!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/" + ], + "metadata": { + "id": "0NgxXg5sjv7z" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @title launch WebUI 启动WebUI\n", + "!/usr/local/bin/pip install ipykernel\n", + "!sed -i '9s/False/True/' /content/GPT-SoVITS/config.py\n", + "%cd /content/GPT-SoVITS/\n", + "!/usr/local/bin/python webui.py" + ], + "metadata": { + "id": "4oRGUzkrk8C7" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From 25bbb8fd6cf5bfc0bc5b5e5d4efd723a16b15b21 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 23:03:43 +0800 Subject: [PATCH 124/126] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6d5dd22..199ae5a 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.


+[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb) [![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) [![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) From 5c2841f5bd6fc09c3867e21c58e5239019b6a9bc Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 23:08:07 +0800 Subject: [PATCH 125/126] Add files via upload --- i18n/locale/en_US.json | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index b7ea99a..0a07679 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -2,14 +2,22 @@ "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", "UVR5已开启": "UVR5 opened ", "UVR5已关闭": "UVR5 closed", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE for details.", "0-前置数据集获取工具": "0-Fetch dataset", "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)", "是否开启UVR5-WebUI": "Open UVR5-WebUI", "UVR5进程输出信息": "UVR5 process output log", "0b-语音切分工具": "0b-Audio slicer", + ".list标注文件的路径": ".list annotation file path", + "GPT模型列表": "GPT weight list", + "SoVITS模型列表": "SoVITS weight list", + "填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。": "Fill in the directory of segmented audio. The complete path of the read audio file is equal to the directory concatenated with the waveform's corresponding filename from the list file (not the full path).", "音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)", "切分后的子音频的输出根目录": "Audio slicer output folder", + "怎么切": "How to slice the sentence", + "不切": "No slice", + "凑四句一切": "Slice once every 4 sentences", + "按英文句号.切": "Slice by English punct", "threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise", "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "Minimum length", "min_interval:最短切割间隔": "Minumum interval for audio cutting", @@ -86,13 +94,15 @@ "TTS推理WebUI进程输出信息": "TTS inference webui output log", "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer", "施工中,请静候佳音": "In construction, please wait", + "参考音频在3~10秒范围外,请更换!": "Reference audio is outside the 3-10 second range, please choose another one!", + "请上传3~10秒内参考音频,超过会报错!": "Please upload a reference audio within the 3-10 second range; if it exceeds this duration, it will raise errors.", "TTS推理进程已开启": "TTS inference process is opened", "TTS推理进程已关闭": "TTS inference process closed", "打标工具WebUI已开启": "proofreading tool webui is opened", "打标工具WebUI已关闭": "proofreading tool webui is closed", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.", "*请上传并填写参考信息": "*Please upload and fill reference information", - "*请填写需要合成的目标文本": "*Please fill the text that needs inference", + "*请填写需要合成的目标文本。中英混合选中文,日英混合选日文,中日混合暂不支持,非目标语言文本自动遗弃。": "*Please fill the text that needs inference. Select Chinese for mixed Chinese and English text, choose Japanese for mixed Japanese and English text. Mixed Chinese and Japanese is currently not supported; non-target language text will be automatically discarded.", "ASR任务开启:%s": "ASR training started: %s", "GPT训练完成": "Finished GPT training", "GPT训练开始:%s": "GPT training started: %s", From 5afe04cf3a99900fc2c889bdec2d635af77cb20b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Mon, 29 Jan 2024 23:11:50 +0800 Subject: [PATCH 126/126] Update README.md --- README.md | 49 +++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 199ae5a..2283e13 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,8 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

> Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here! +Unseen speakers few-shot fine-tuning demo: + https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb For users in China region, you can use AutoDL Cloud Docker to experience the full functionality online: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official @@ -48,27 +50,6 @@ If you are a Windows user (tested with win>=10) you can install directly via the _Note: numba==0.56.4 require py<3.11_ -### For Mac Users -If you are a Mac user, make sure you meet the following conditions for training and inferencing with GPU: -- Mac computers with Apple silicon or AMD GPUs -- macOS 12.3 or later -- Xcode command-line tools installed by running `xcode-select --install` - -_Other Macs can do inference with CPU only._ - -Then install by using the following commands: -#### Create Environment -```bash -conda create -n GPTSoVits python=3.9 -conda activate GPTSoVits -``` -#### Install Requirements -```bash -pip install -r requirements.txt -pip uninstall torch torchaudio -pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu -``` -_Note: For preprocessing with UVR5, it is recommended to [download the original project GUI](https://github.com/Anjok07/ultimatevocalremovergui) and select "GPU Conversion". Additionally, there might be memory leak issues, especially during inference. Restarting the inference webUI can help._ ### Quick Install with Conda ```bash @@ -119,6 +100,26 @@ For Chinese ASR (additionally), download models from [Damo ASR Model](https://mo For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`. +### For Mac Users +If you are a Mac user, make sure you meet the following conditions for training and inferencing with GPU: +- Mac computers with Apple silicon or AMD GPUs +- macOS 12.3 or later +- Xcode command-line tools installed by running `xcode-select --install` + +_Other Macs can do inference with CPU only._ + +Then install by using the following commands: +#### Create Environment +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +``` +#### Install Requirements +```bash +pip install -r requirements.txt +pip uninstall torch torchaudio +pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu +``` ### Using Docker @@ -168,9 +169,9 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. ## Todo List - [ ] **High Priority:** - - [ ] Localization in Japanese and English. + - [x] Localization in Japanese and English. - [ ] User guide. - - [ ] Japanese and English dataset fine tune training. + - [x] Japanese and English dataset fine tune training. - [ ] **Features:** - [ ] Zero-shot voice conversion (5s) / few-shot voice conversion (1min). @@ -179,7 +180,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. - [ ] Experiment with changing SoVITS token inputs to probability distribution of vocabs. - [ ] Improve English and Japanese text frontend. - [ ] Develop tiny and larger-sized TTS models. - - [ ] Colab scripts. + - [x] Colab scripts. - [ ] Try expand training dataset (2k hours -> 10k hours). - [ ] better sovits base model (enhanced audio quality) - [ ] model mix