From b1a60801e6ebde7695f78758cc8614e0c3d68a3e Mon Sep 17 00:00:00 2001 From: Ftps Date: Wed, 17 Jan 2024 00:57:07 +0900 Subject: [PATCH 01/46] remove full-width replace init to gitignore --- tools/damo_asr/models/.gitignore | 2 ++ tools/damo_asr/models/init | 1 - tools/init | 1 - tools/uvr5/init | 1 - tools/uvr5/lib/utils.py | 2 +- 5 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 tools/damo_asr/models/.gitignore delete mode 100644 tools/damo_asr/models/init delete mode 100644 tools/init delete mode 100644 tools/uvr5/init diff --git a/tools/damo_asr/models/.gitignore b/tools/damo_asr/models/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/tools/damo_asr/models/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/tools/damo_asr/models/init b/tools/damo_asr/models/init deleted file mode 100644 index 8b137891..00000000 --- a/tools/damo_asr/models/init +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tools/init b/tools/init deleted file mode 100644 index 8b137891..00000000 --- a/tools/init +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tools/uvr5/init b/tools/uvr5/init deleted file mode 100644 index 8b137891..00000000 --- a/tools/uvr5/init +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tools/uvr5/lib/utils.py b/tools/uvr5/lib/utils.py index 946eb0cb..5e8cd22f 100644 --- a/tools/uvr5/lib/utils.py +++ b/tools/uvr5/lib/utils.py @@ -24,7 +24,7 @@ def make_padding(width, cropsize, offset): def inference(X_spec, device, model, aggressiveness, data): """ - data : dic configs + data : dic configs """ def _execute( From c761de73f40155f3ed5048c688fe2a92807865a4 Mon Sep 17 00:00:00 2001 From: Ftps Date: Wed, 17 Jan 2024 01:04:01 +0900 Subject: [PATCH 02/46] add --- GPT_SoVITS/pretrained_models/.gitignore | 2 ++ GPT_SoVITS/pretrained_models/init | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 GPT_SoVITS/pretrained_models/.gitignore delete mode 100644 GPT_SoVITS/pretrained_models/init diff --git a/GPT_SoVITS/pretrained_models/.gitignore b/GPT_SoVITS/pretrained_models/.gitignore new file mode 100644 index 00000000..c96a04f0 --- /dev/null +++ b/GPT_SoVITS/pretrained_models/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore \ No newline at end of file diff --git a/GPT_SoVITS/pretrained_models/init b/GPT_SoVITS/pretrained_models/init deleted file mode 100644 index 8b137891..00000000 --- a/GPT_SoVITS/pretrained_models/init +++ /dev/null @@ -1 +0,0 @@ - From ebe96e0ebe25d6ac96ad1728f1a5d34fb94d4898 Mon Sep 17 00:00:00 2001 From: Ftps Date: Wed, 17 Jan 2024 01:05:50 +0900 Subject: [PATCH 03/46] add --- GPT_SoVITS/init | 1 - 1 file changed, 1 deletion(-) delete mode 100644 GPT_SoVITS/init diff --git a/GPT_SoVITS/init b/GPT_SoVITS/init deleted file mode 100644 index 8b137891..00000000 --- a/GPT_SoVITS/init +++ /dev/null @@ -1 +0,0 @@ - From d9627691b208cecb5f472033050e3003edb8cb6b Mon Sep 17 00:00:00 2001 From: Ftps Date: Wed, 17 Jan 2024 12:35:38 +0900 Subject: [PATCH 04/46] add gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..64eef948 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +__pycache__ +*.pyc From f2f3d1786773688cc98e1aaecc02fdf0d0bf192f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 00:30:37 +0800 Subject: [PATCH 05/46] Add files via upload --- webui.py | 1317 +++++++++++++++--------------------------------------- 1 file changed, 354 insertions(+), 963 deletions(-) diff --git a/webui.py b/webui.py index e51b9e61..dbccba79 100644 --- a/webui.py +++ b/webui.py @@ -1,48 +1,35 @@ -import json, yaml, warnings, torch +import json,yaml,warnings,torch import platform warnings.filterwarnings("ignore") torch.manual_seed(233333) -import os, sys - +import os,pdb,sys now_dir = os.getcwd() tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp import site - -site_packages_root = "%s/runtime/Lib/site-packages" % now_dir +site_packages_root="%s/runtime/Lib/site-packages"%now_dir for path in site.getsitepackages(): - if "site-packages" in path: - site_packages_root = path + if("site-packages"in path):site_packages_root=path os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir) - ) +with open("%s/users.pth"%(site_packages_root),"w")as f: + f.write("%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"%(now_dir,now_dir,now_dir,now_dir,now_dir)) import traceback - sys.path.append(now_dir) +import shutil +import pdb import gradio as gr from subprocess import Popen -from config import ( - python_exec, - infer_device, - is_half, - exp_root, - webui_port_main, - webui_port_infer_tts, - webui_port_uvr5, - webui_port_subfix, -) -from tools.i18n.i18n import I18nAuto - +import signal +from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix +from i18n.i18n import I18nAuto i18n = I18nAuto() +from scipy.io import wavfile +from tools.my_utils import load_audio from multiprocessing import cpu_count - -n_cpu = cpu_count() +n_cpu=cpu_count() # 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() @@ -53,42 +40,11 @@ if_gpu_ok = False if torch.cuda.is_available() or ngpu != 0: for i in range(ngpu): gpu_name = torch.cuda.get_device_name(i) - if any( - value in gpu_name.upper() - for value in [ - "10", - "16", - "20", - "30", - "40", - "A2", - "A3", - "A4", - "P4", - "A50", - "500", - "A60", - "70", - "80", - "90", - "M4", - "T4", - "TITAN", - "L", - ] - ): + if any(value in gpu_name.upper()for value in ["10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L"]): # A10#A100#V100#A40#P40#M40#K80#A4500 if_gpu_ok = True # 至少有一张能用的N卡 gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append( - int( - torch.cuda.get_device_properties(i).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - ) + mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4)) if if_gpu_ok and len(gpu_infos) > 0: gpu_info = "\n".join(gpu_infos) default_batch_size = min(mem) // 2 @@ -97,395 +53,230 @@ else: default_batch_size = 1 gpus = "-".join([i[0] for i in gpu_infos]) -pretrained_sovits_name = "GPT_SoVITS/pretrained_models/s2G488k.pth" -pretrained_gpt_name = ( - "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" -) - - +pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth" +pretrained_gpt_name="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" def get_weights_names(): SoVITS_names = [pretrained_sovits_name] for name in os.listdir(SoVITS_weight_root): - if name.endswith(".pth"): - SoVITS_names.append(name) + if name.endswith(".pth"):SoVITS_names.append(name) GPT_names = [pretrained_gpt_name] for name in os.listdir(GPT_weight_root): - if name.endswith(".ckpt"): - GPT_names.append(name) - return SoVITS_names, GPT_names - - -SoVITS_weight_root = "SoVITS_weights" -GPT_weight_root = "GPT_weights" -os.makedirs(SoVITS_weight_root, exist_ok=True) -os.makedirs(GPT_weight_root, exist_ok=True) -SoVITS_names, GPT_names = get_weights_names() - + if name.endswith(".ckpt"): GPT_names.append(name) + return SoVITS_names,GPT_names +SoVITS_weight_root="SoVITS_weights" +GPT_weight_root="GPT_weights" +os.makedirs(SoVITS_weight_root,exist_ok=True) +os.makedirs(GPT_weight_root,exist_ok=True) +SoVITS_names,GPT_names = get_weights_names() def change_choices(): SoVITS_names, GPT_names = get_weights_names() - return {"choices": sorted(SoVITS_names), "__type__": "update"}, { - "choices": sorted(GPT_names), - "__type__": "update", - } - - -p_label = None -p_uvr5 = None -p_asr = None -p_tts_inference = None - -system = platform.system() + return {"choices": sorted(SoVITS_names), "__type__": "update"}, {"choices": sorted(GPT_names), "__type__": "update"} +p_label=None +p_uvr5=None +p_asr=None +p_tts_inference=None +system=platform.system() def kill_process(pid): - if system == "Windows": + if(system=="Windows"): cmd = "taskkill /t /f /pid %s" % pid else: - cmd = "kill -9 %s" % pid + cmd = "kill -9 %s"%pid print(cmd) - os.system(cmd) ###linux上杀了webui,可能还会没杀干净。。。 + os.system(cmd)###linux上杀了webui,可能还会没杀干净。。。 # os.kill(p_label.pid,19)#主进程#控制台进程#python子进程###不好使,连主进程的webui一起关了,辣鸡 - -def change_label(if_label, path_list): +def change_label(if_label,path_list): global p_label - if if_label == True and p_label == None: - cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s' % ( - python_exec, - path_list, - webui_port_subfix, - ) + if(if_label==True and p_label==None): + cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s'%(python_exec,path_list,webui_port_subfix) yield "打标工具WebUI已开启" print(cmd) p_label = Popen(cmd, shell=True) - elif if_label == False and p_label != None: + elif(if_label==False and p_label!=None): kill_process(p_label.pid) - p_label = None + p_label=None yield "打标工具WebUI已关闭" - def change_uvr5(if_uvr5): global p_uvr5 - if if_uvr5 == True and p_uvr5 == None: - cmd = '"%s" tools/uvr5/webui.py "%s" %s %s' % ( - python_exec, - infer_device, - is_half, - webui_port_uvr5, - ) + if(if_uvr5==True and p_uvr5==None): + cmd = '"%s" tools/uvr5/webui.py "%s" %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5) yield "UVR5已开启" print(cmd) p_uvr5 = Popen(cmd, shell=True) - elif if_uvr5 == False and p_uvr5 != None: + elif(if_uvr5==False and p_uvr5!=None): kill_process(p_uvr5.pid) - p_uvr5 = None + p_uvr5=None yield "UVR5已关闭" - -def change_tts_inference( - if_tts, bert_path, cnhubert_base_path, gpu_number, gpt_path, sovits_path -): +def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path): global p_tts_inference - if if_tts == True and p_tts_inference == None: - os.environ["gpt_path"] = ( - gpt_path if "/" in gpt_path else "%s/%s" % (GPT_weight_root, gpt_path) - ) - os.environ["sovits_path"] = ( - sovits_path - if "/" in sovits_path - else "%s/%s" % (SoVITS_weight_root, sovits_path) - ) - os.environ["cnhubert_base_path"] = cnhubert_base_path - os.environ["bert_path"] = bert_path - os.environ["_CUDA_VISIBLE_DEVICES"] = gpu_number - os.environ["is_half"] = str(is_half) - os.environ["infer_ttswebui"] = str(webui_port_infer_tts) - cmd = '"%s" GPT_SoVITS/inference_webui.py' % (python_exec) + if(if_tts==True and p_tts_inference==None): + os.environ["gpt_path"]=gpt_path if "/" in gpt_path else "%s/%s"%(GPT_weight_root,gpt_path) + os.environ["sovits_path"]=sovits_path if "/"in sovits_path else "%s/%s"%(SoVITS_weight_root,sovits_path) + os.environ["cnhubert_base_path"]=cnhubert_base_path + os.environ["bert_path"]=bert_path + os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number + os.environ["is_half"]=str(is_half) + os.environ["infer_ttswebui"]=str(webui_port_infer_tts) + cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec) yield "TTS推理进程已开启" print(cmd) p_tts_inference = Popen(cmd, shell=True) - elif if_tts == False and p_tts_inference != None: + elif(if_tts==False and p_tts_inference!=None): kill_process(p_tts_inference.pid) - p_tts_inference = None + p_tts_inference=None yield "TTS推理进程已关闭" def open_asr(asr_inp_dir): global p_asr - if p_asr == None: - cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"' % (python_exec, asr_inp_dir) - yield "ASR任务开启:%s" % cmd, {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + if(p_asr==None): + cmd = '"%s" tools/damo_asr/cmd-asr.py "%s"'%(python_exec,asr_inp_dir) + yield "ASR任务开启:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() - p_asr = None - yield "ASR任务完成", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + p_asr=None + yield "ASR任务完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的ASR任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - + yield "已有正在进行的ASR任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close_asr(): global p_asr - if p_asr != None: + if(p_asr!=None): kill_process(p_asr.pid) - p_asr = None - return ( - "已终止ASR进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) + p_asr=None + return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} - -""" +''' button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Bb,button1Ba_open,button1Ba_close]) button1Ba_close.click(close1Ba, [], [info1Bb,button1Ba_open,button1Ba_close]) -""" -p_train_SoVITS = None - - -def open1Ba( - batch_size, - total_epoch, - exp_name, - text_low_lr_rate, - if_save_latest, - if_save_every_weights, - save_every_epoch, - gpu_numbers1Ba, - pretrained_s2G, - pretrained_s2D, -): +''' +p_train_SoVITS=None +def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): global p_train_SoVITS - if p_train_SoVITS == None: - with open("GPT_SoVITS/configs/s2.json") as f: - data = f.read() - data = json.loads(data) - s2_dir = "%s/%s" % (exp_root, exp_name) - os.makedirs("%s/logs_s2" % (s2_dir), exist_ok=True) - data["train"]["batch_size"] = batch_size - data["train"]["epochs"] = total_epoch - data["train"]["text_low_lr_rate"] = text_low_lr_rate - data["train"]["pretrained_s2G"] = pretrained_s2G - data["train"]["pretrained_s2D"] = pretrained_s2D - data["train"]["if_save_latest"] = if_save_latest - data["train"]["if_save_every_weights"] = if_save_every_weights - data["train"]["save_every_epoch"] = save_every_epoch - data["train"]["gpu_numbers"] = gpu_numbers1Ba - data["data"]["exp_dir"] = data["s2_ckpt_dir"] = s2_dir - data["save_weight_dir"] = SoVITS_weight_root - data["name"] = exp_name - tmp_config_path = "TEMP/tmp_s2.json" - with open(tmp_config_path, "w") as f: - f.write(json.dumps(data)) + if(p_train_SoVITS==None): + with open("GPT_SoVITS/configs/s2.json")as f: + data=f.read() + data=json.loads(data) + s2_dir="%s/%s"%(exp_root,exp_name) + os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True) + data["train"]["batch_size"]=batch_size + data["train"]["epochs"]=total_epoch + data["train"]["text_low_lr_rate"]=text_low_lr_rate + data["train"]["pretrained_s2G"]=pretrained_s2G + data["train"]["pretrained_s2D"]=pretrained_s2D + data["train"]["if_save_latest"]=if_save_latest + data["train"]["if_save_every_weights"]=if_save_every_weights + data["train"]["save_every_epoch"]=save_every_epoch + data["train"]["gpu_numbers"]=gpu_numbers1Ba + data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir + data["save_weight_dir"]=SoVITS_weight_root + data["name"]=exp_name + tmp_config_path="TEMP/tmp_s2.json" + with open(tmp_config_path,"w")as f:f.write(json.dumps(data)) - cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"' % ( - python_exec, - tmp_config_path, - ) - yield "SoVITS训练开始:%s" % cmd, {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path) + yield "SoVITS训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_SoVITS = Popen(cmd, shell=True) p_train_SoVITS.wait() - p_train_SoVITS = None - yield "SoVITS训练完成", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + p_train_SoVITS=None + yield "SoVITS训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务", { - "__type__": "update", - "visible": False, - }, {"__type__": "update", "visible": True} - + yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Ba(): global p_train_SoVITS - if p_train_SoVITS != None: + if(p_train_SoVITS!=None): kill_process(p_train_SoVITS.pid) - p_train_SoVITS = None - return ( - "已终止SoVITS训练", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) + p_train_SoVITS=None + return "已终止SoVITS训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} - -p_train_GPT = None - - -def open1Bb( - batch_size, - total_epoch, - exp_name, - if_save_latest, - if_save_every_weights, - save_every_epoch, - gpu_numbers, - pretrained_s1, -): +p_train_GPT=None +def open1Bb(batch_size,total_epoch,exp_name,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1): global p_train_GPT - if p_train_GPT == None: - with open("GPT_SoVITS/configs/s1longer.yaml") as f: - data = f.read() - data = yaml.load(data, Loader=yaml.FullLoader) - s1_dir = "%s/%s" % (exp_root, exp_name) - os.makedirs("%s/logs_s1" % (s1_dir), exist_ok=True) - data["train"]["batch_size"] = batch_size - data["train"]["epochs"] = total_epoch - data["pretrained_s1"] = pretrained_s1 - data["train"]["save_every_n_epoch"] = save_every_epoch - data["train"]["if_save_every_weights"] = if_save_every_weights - data["train"]["if_save_latest"] = if_save_latest - data["train"]["half_weights_save_dir"] = GPT_weight_root - data["train"]["exp_name"] = exp_name - data["train_semantic_path"] = "%s/6-name2semantic.tsv" % s1_dir - data["train_phoneme_path"] = "%s/2-name2text.txt" % s1_dir - data["output_dir"] = "%s/logs_s1" % s1_dir + if(p_train_GPT==None): + with open("GPT_SoVITS/configs/s1longer.yaml")as f: + data=f.read() + data=yaml.load(data, Loader=yaml.FullLoader) + s1_dir="%s/%s"%(exp_root,exp_name) + os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True) + data["train"]["batch_size"]=batch_size + data["train"]["epochs"]=total_epoch + data["pretrained_s1"]=pretrained_s1 + data["train"]["save_every_n_epoch"]=save_every_epoch + data["train"]["if_save_every_weights"]=if_save_every_weights + data["train"]["if_save_latest"]=if_save_latest + data["train"]["half_weights_save_dir"]=GPT_weight_root + data["train"]["exp_name"]=exp_name + data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir + data["train_phoneme_path"]="%s/2-name2text.txt"%s1_dir + data["output_dir"]="%s/logs_s1"%s1_dir - os.environ["_CUDA_VISIBLE_DEVICES"] = gpu_numbers.replace("-", ",") - os.environ["hz"] = "25hz" - tmp_config_path = "TEMP/tmp_s1.yaml" - with open(tmp_config_path, "w") as f: - f.write(yaml.dump(data, default_flow_style=False)) + os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_numbers.replace("-",",") + os.environ["hz"]="25hz" + tmp_config_path="TEMP/tmp_s1.yaml" + with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False)) # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir) - cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" ' % ( - python_exec, - tmp_config_path, - ) - yield "GPT训练开始:%s" % cmd, {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path) + yield "GPT训练开始:%s"%cmd,{"__type__":"update","visible":False},{"__type__":"update","visible":True} print(cmd) p_train_GPT = Popen(cmd, shell=True) p_train_GPT.wait() - p_train_GPT = None - yield "GPT训练完成", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + p_train_GPT=None + yield "GPT训练完成",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务", { - "__type__": "update", - "visible": False, - }, {"__type__": "update", "visible": True} - + yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务",{"__type__":"update","visible":False},{"__type__":"update","visible":True} def close1Bb(): global p_train_GPT - if p_train_GPT != None: + if(p_train_GPT!=None): kill_process(p_train_GPT.pid) - p_train_GPT = None - return ( - "已终止GPT训练", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) + p_train_GPT=None + return "已终止GPT训练",{"__type__":"update","visible":True},{"__type__":"update","visible":False} - -ps_slice = [] - - -def open_slice( - inp, - opt_root, - threshold, - min_length, - min_interval, - hop_size, - max_sil_kept, - _max, - alpha, - n_parts, -): +ps_slice=[] +def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts): global ps_slice - if os.path.exists(inp) == False: - yield "输入路径不存在", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + if(os.path.exists(inp)==False): + yield "输入路径不存在",{"__type__":"update","visible":True},{"__type__":"update","visible":False} return - if os.path.isfile(inp): - n_parts = 1 - elif os.path.isdir(inp): - pass + if os.path.isfile(inp):n_parts=1 + elif os.path.isdir(inp):pass else: - yield "输入路径存在但既不是文件也不是文件夹", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + yield "输入路径存在但既不是文件也不是文件夹",{"__type__":"update","visible":True},{"__type__":"update","visible":False} return - if ps_slice == []: + if (ps_slice == []): for i_part in range(n_parts): - cmd = ( - '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s' - "" - % ( - python_exec, - inp, - opt_root, - threshold, - min_length, - min_interval, - hop_size, - max_sil_kept, - _max, - alpha, - i_part, - n_parts, - ) - ) + cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts) print(cmd) p = Popen(cmd, shell=True) ps_slice.append(p) - yield "切割执行中", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps_slice: p.wait() - ps_slice = [] - yield "切割结束", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + ps_slice=[] + yield "切割结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - + yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close_slice(): global ps_slice - if ps_slice != []: + if (ps_slice != []): for p_slice in ps_slice: try: kill_process(p_slice.pid) except: traceback.print_exc() - ps_slice = [] - return ( - "已终止所有切割进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) + ps_slice=[] + return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} - -""" +''' inp_text= os.environ.get("inp_text") inp_wav_dir= os.environ.get("inp_wav_dir") exp_name= os.environ.get("exp_name") @@ -494,71 +285,53 @@ all_parts= os.environ.get("all_parts") os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") opt_dir= os.environ.get("opt_dir")#"/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name bert_pretrained_dir= os.environ.get("bert_pretrained_dir")#"/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large" -""" -ps1a = [] - - -def open1a(inp_text, inp_wav_dir, exp_name, gpu_numbers, bert_pretrained_dir): +''' +ps1a=[] +def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): global ps1a - if ps1a == []: - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": "%s/%s" % (exp_root, exp_name), - "bert_pretrained_dir": bert_pretrained_dir, + if (ps1a == []): + config={ + "inp_text":inp_text, + "inp_wav_dir":inp_wav_dir, + "exp_name":exp_name, + "opt_dir":"%s/%s"%(exp_root,exp_name), + "bert_pretrained_dir":bert_pretrained_dir, } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { "i_part": str(i_part), "all_parts": str(all_parts), "_CUDA_VISIBLE_DEVICES": gpu_names[i_part], - "is_half": str(is_half), + "is_half": str(is_half) } ) os.environ.update(config) - cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py' % python_exec + cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1a.append(p) - yield "文本进程执行中", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() - ps1a = [] - yield "文本进程结束", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + ps1a=[] + yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - + yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1a(): global ps1a - if ps1a != []: + if (ps1a != []): for p1a in ps1a: try: kill_process(p1a.pid) except: traceback.print_exc() - ps1a = [] - return ( - "已终止所有1a进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) - - -""" + ps1a=[] + return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} +''' inp_text= os.environ.get("inp_text") inp_wav_dir= os.environ.get("inp_wav_dir") exp_name= os.environ.get("exp_name") @@ -567,23 +340,21 @@ all_parts= os.environ.get("all_parts") os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") opt_dir= os.environ.get("opt_dir") cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir") -""" -ps1b = [] - - -def open1b(inp_text, inp_wav_dir, exp_name, gpu_numbers, ssl_pretrained_dir): +''' +ps1b=[] +def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): global ps1b - if ps1b == []: - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": "%s/%s" % (exp_root, exp_name), - "cnhubert_base_dir": ssl_pretrained_dir, - "is_half": str(is_half), + if (ps1b == []): + config={ + "inp_text":inp_text, + "inp_wav_dir":inp_wav_dir, + "exp_name":exp_name, + "opt_dir":"%s/%s"%(exp_root,exp_name), + "cnhubert_base_dir":ssl_pretrained_dir, + "is_half": str(is_half) } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { @@ -593,47 +364,29 @@ def open1b(inp_text, inp_wav_dir, exp_name, gpu_numbers, ssl_pretrained_dir): } ) os.environ.update(config) - cmd = ( - '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py' % python_exec - ) + cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1b.append(p) - yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1b: p.wait() - ps1b = [] - yield "SSL提取进程结束", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + ps1b=[] + yield "SSL提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", { - "__type__": "update", - "visible": False, - }, {"__type__": "update", "visible": True} - + yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1b(): global ps1b - if ps1b != []: + if (ps1b != []): for p1b in ps1b: try: kill_process(p1b.pid) except: traceback.print_exc() - ps1b = [] - return ( - "已终止所有1b进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) - - -""" + ps1b=[] + return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} +''' inp_text= os.environ.get("inp_text") exp_name= os.environ.get("exp_name") i_part= os.environ.get("i_part") @@ -641,23 +394,21 @@ all_parts= os.environ.get("all_parts") os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") opt_dir= os.environ.get("opt_dir") pretrained_s2G= os.environ.get("pretrained_s2G") -""" -ps1c = [] - - -def open1c(inp_text, exp_name, gpu_numbers, pretrained_s2G_path): +''' +ps1c=[] +def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): global ps1c - if ps1c == []: - config = { - "inp_text": inp_text, - "exp_name": exp_name, - "opt_dir": "%s/%s" % (exp_root, exp_name), - "pretrained_s2G": pretrained_s2G_path, - "s2config_path": "GPT_SoVITS/configs/s2.json", - "is_half": str(is_half), + if (ps1c == []): + config={ + "inp_text":inp_text, + "exp_name":exp_name, + "opt_dir":"%s/%s"%(exp_root,exp_name), + "pretrained_s2G":pretrained_s2G_path, + "s2config_path":"GPT_SoVITS/configs/s2.json", + "is_half": str(is_half) } - gpu_names = gpu_numbers.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { @@ -667,76 +418,48 @@ def open1c(inp_text, exp_name, gpu_numbers, pretrained_s2G_path): } ) os.environ.update(config) - cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py' % python_exec + cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1c.append(p) - yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() - ps1c = [] - yield "语义token提取进程结束", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + ps1c=[] + yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: - yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", { - "__type__": "update", - "visible": False, - }, {"__type__": "update", "visible": True} - + yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1c(): global ps1c - if ps1c != []: + if (ps1c != []): for p1c in ps1c: try: kill_process(p1c.pid) except: traceback.print_exc() - ps1c = [] - return ( - "已终止所有语义token进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) - - + ps1c=[] + return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} #####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G -ps1abc = [] - - -def open1abc( - inp_text, - inp_wav_dir, - exp_name, - gpu_numbers1a, - gpu_numbers1Ba, - gpu_numbers1c, - bert_pretrained_dir, - ssl_pretrained_dir, - pretrained_s2G_path, -): +ps1abc=[] +def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path): global ps1abc - if ps1abc == []: - opt_dir = "%s/%s" % (exp_root, exp_name) + if (ps1abc == []): + opt_dir="%s/%s"%(exp_root,exp_name) try: #############################1a - path_text = "%s/2-name2text.txt" % opt_dir - if os.path.exists(path_text) == False: - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": opt_dir, - "bert_pretrained_dir": bert_pretrained_dir, - "is_half": str(is_half), + path_text="%s/2-name2text.txt" % opt_dir + if(os.path.exists(path_text)==False): + config={ + "inp_text":inp_text, + "inp_wav_dir":inp_wav_dir, + "exp_name":exp_name, + "opt_dir":opt_dir, + "bert_pretrained_dir":bert_pretrained_dir, + "is_half": str(is_half) } - gpu_names = gpu_numbers1a.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers1a.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { @@ -746,43 +469,34 @@ def open1abc( } ) os.environ.update(config) - cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py' % python_exec + cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-ing", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - for p in ps1abc: - p.wait() + yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + for p in ps1abc:p.wait() opt = [] - for i_part in range( - all_parts - ): # txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part) + for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part) txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) - with open(txt_path, "r", encoding="utf8") as f: + with open(txt_path, "r",encoding="utf8") as f: opt += f.read().strip("\n").split("\n") os.remove(txt_path) - with open(path_text, "w", encoding="utf8") as f: + with open(path_text, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:1a-done", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - ps1abc = [] + yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + ps1abc=[] #############################1b - config = { - "inp_text": inp_text, - "inp_wav_dir": inp_wav_dir, - "exp_name": exp_name, - "opt_dir": opt_dir, - "cnhubert_base_dir": ssl_pretrained_dir, + config={ + "inp_text":inp_text, + "inp_wav_dir":inp_wav_dir, + "exp_name":exp_name, + "opt_dir":opt_dir, + "cnhubert_base_dir":ssl_pretrained_dir, } - gpu_names = gpu_numbers1Ba.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers1Ba.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { @@ -792,36 +506,26 @@ def open1abc( } ) os.environ.update(config) - cmd = ( - '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py' - % python_exec - ) + cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - for p in ps1abc: - p.wait() - yield "进度:1a1b-done", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - ps1abc = [] + yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + for p in ps1abc:p.wait() + yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir - if os.path.exists(path_semantic) == False: - config = { - "inp_text": inp_text, - "exp_name": exp_name, - "opt_dir": opt_dir, - "pretrained_s2G": pretrained_s2G_path, - "s2config_path": "GPT_SoVITS/configs/s2.json", + if(os.path.exists(path_semantic)==False): + config={ + "inp_text":inp_text, + "exp_name":exp_name, + "opt_dir":opt_dir, + "pretrained_s2G":pretrained_s2G_path, + "s2config_path":"GPT_SoVITS/configs/s2.json", } - gpu_names = gpu_numbers1c.split("-") - all_parts = len(gpu_names) + gpu_names=gpu_numbers1c.split("-") + all_parts=len(gpu_names) for i_part in range(all_parts): config.update( { @@ -831,137 +535,74 @@ def open1abc( } ) os.environ.update(config) - cmd = ( - '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py' - % python_exec - ) + cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec print(cmd) p = Popen(cmd, shell=True) ps1abc.append(p) - yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - for p in ps1abc: - p.wait() + yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} + for p in ps1abc:p.wait() opt = ["item_name semantic_audio"] for i_part in range(all_parts): semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) - with open(semantic_path, "r", encoding="utf8") as f: + with open(semantic_path, "r",encoding="utf8") as f: opt += f.read().strip("\n").split("\n") os.remove(semantic_path) - with open(path_semantic, "w", encoding="utf8") as f: + with open(path_semantic, "w",encoding="utf8") as f: f.write("\n".join(opt) + "\n") - yield "进度:all-done", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } + yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} ps1abc = [] - yield "一键三连进程结束", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} except: traceback.print_exc() close1abc() - yield "一键三连中途报错", {"__type__": "update", "visible": True}, { - "__type__": "update", - "visible": False, - } + yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} else: - yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, { - "__type__": "update", - "visible": True, - } - + yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} def close1abc(): global ps1abc - if ps1abc != []: + if (ps1abc != []): for p1abc in ps1abc: try: kill_process(p1abc.pid) except: traceback.print_exc() - ps1abc = [] - return ( - "已终止所有一键三连进程", - {"__type__": "update", "visible": True}, - {"__type__": "update", "visible": False}, - ) - + ps1abc=[] + return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( - value="本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." + value= + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." ) with gr.Tabs(): - with gr.TabItem("0-前置数据集获取工具"): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 + with gr.TabItem("0-前置数据集获取工具"):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 gr.Markdown(value="0a-UVR5人声伴奏分离&去混响去延迟工具") with gr.Row(): - if_uvr5 = gr.Checkbox(label="是否开启UVR5-WebUI", show_label=True) + if_uvr5 = gr.Checkbox(label="是否开启UVR5-WebUI",show_label=True) uvr5_info = gr.Textbox(label="UVR5进程输出信息") gr.Markdown(value="0b-语音切分工具") with gr.Row(): with gr.Row(): - slice_inp_path = gr.Textbox(label="音频自动切分输入路径,可文件可文件夹", value="") - slice_opt_root = gr.Textbox( - label="切分后的子音频的输出根目录", value="output/slicer_opt" - ) - threshold = gr.Textbox( - label="threshold:音量小于这个值视作静音的备选切割点", value="-34" - ) - min_length = gr.Textbox( - label="min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值", value="4000" - ) - min_interval = gr.Textbox(label="min_interval:最短切割间隔", value="300") - hop_size = gr.Textbox( - label="hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)", value="10" - ) - max_sil_kept = gr.Textbox( - label="max_sil_kept:切完后静音最多留多长", value="500" - ) + slice_inp_path=gr.Textbox(label="音频自动切分输入路径,可文件可文件夹",value="") + slice_opt_root=gr.Textbox(label="切分后的子音频的输出根目录",value="output/slicer_opt") + threshold=gr.Textbox(label="threshold:音量小于这个值视作静音的备选切割点",value="-34") + min_length=gr.Textbox(label="min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值",value="4000") + min_interval=gr.Textbox(label="min_interval:最短切割间隔",value="300") + hop_size=gr.Textbox(label="hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)",value="10") + max_sil_kept=gr.Textbox(label="max_sil_kept:切完后静音最多留多长",value="500") with gr.Row(): - open_slicer_button = gr.Button( - "开启语音切割", variant="primary", visible=True - ) - close_slicer_button = gr.Button( - "终止语音切割", variant="primary", visible=False - ) - _max = gr.Slider( - minimum=0, - maximum=1, - step=0.05, - label="max:归一化后最大值多少", - value=0.9, - interactive=True, - ) - alpha = gr.Slider( - minimum=0, - maximum=1, - step=0.05, - label="alpha_mix:混多少比例归一化后音频进来", - value=0.25, - interactive=True, - ) - n_process = gr.Slider( - minimum=1, - maximum=n_cpu, - step=1, - label="切割使用的进程数", - value=4, - interactive=True, - ) + open_slicer_button=gr.Button("开启语音切割", variant="primary",visible=True) + close_slicer_button=gr.Button("终止语音切割", variant="primary",visible=False) + _max=gr.Slider(minimum=0,maximum=1,step=0.05,label="max:归一化后最大值多少",value=0.9,interactive=True) + alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label="alpha_mix:混多少比例归一化后音频进来",value=0.25,interactive=True) + n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label="切割使用的进程数",value=4,interactive=True) slicer_info = gr.Textbox(label="语音切割进程输出信息") gr.Markdown(value="0c-中文批量离线ASR工具") with gr.Row(): - open_asr_button = gr.Button( - "开启离线批量ASR", variant="primary", visible=True - ) - close_asr_button = gr.Button( - "终止ASR进程", variant="primary", visible=False - ) + open_asr_button = gr.Button("开启离线批量ASR", variant="primary",visible=True) + close_asr_button = gr.Button("终止ASR进程", variant="primary",visible=False) asr_inp_dir = gr.Textbox( label="批量ASR(中文only)输入文件夹路径", value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx", @@ -970,365 +611,115 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: asr_info = gr.Textbox(label="ASR进程输出信息") gr.Markdown(value="0d-语音文本校对标注工具") with gr.Row(): - if_label = gr.Checkbox(label="是否开启打标WebUI", show_label=True) + if_label = gr.Checkbox(label="是否开启打标WebUI",show_label=True) path_list = gr.Textbox( label="打标数据标注文件路径", value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list", interactive=True, ) label_info = gr.Textbox(label="打标工具进程输出信息") - if_label.change(change_label, [if_label, path_list], [label_info]) + if_label.change(change_label, [if_label,path_list], [label_info]) if_uvr5.change(change_uvr5, [if_uvr5], [uvr5_info]) - open_asr_button.click( - open_asr, [asr_inp_dir], [asr_info, open_asr_button, close_asr_button] - ) - close_asr_button.click( - close_asr, [], [asr_info, open_asr_button, close_asr_button] - ) - open_slicer_button.click( - open_slice, - [ - slice_inp_path, - slice_opt_root, - threshold, - min_length, - min_interval, - hop_size, - max_sil_kept, - _max, - alpha, - n_process, - ], - [slicer_info, open_slicer_button, close_slicer_button], - ) - close_slicer_button.click( - close_slice, [], [slicer_info, open_slicer_button, close_slicer_button] - ) + open_asr_button.click(open_asr, [asr_inp_dir], [asr_info,open_asr_button,close_asr_button]) + close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button]) + open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button]) + close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button]) with gr.TabItem("1-GPT-SoVITS-TTS"): with gr.Row(): exp_name = gr.Textbox(label="*实验/模型名", value="xxx", interactive=True) - gpu_info = gr.Textbox( - label="显卡信息", value=gpu_info, visible=True, interactive=False - ) - pretrained_s2G = gr.Textbox( - label="预训练的SoVITS-G模型路径", - value="GPT_SoVITS/pretrained_models/s2G488k.pth", - interactive=True, - ) - pretrained_s2D = gr.Textbox( - label="预训练的SoVITS-D模型路径", - value="GPT_SoVITS/pretrained_models/s2D488k.pth", - interactive=True, - ) - pretrained_s1 = gr.Textbox( - label="预训练的GPT模型路径", - value="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", - interactive=True, - ) + gpu_info = gr.Textbox(label="显卡信息", value=gpu_info, visible=True, interactive=False) + pretrained_s2G = gr.Textbox(label="预训练的SoVITS-G模型路径", value="GPT_SoVITS/pretrained_models/s2G488k.pth", interactive=True) + pretrained_s2D = gr.Textbox(label="预训练的SoVITS-D模型路径", value="GPT_SoVITS/pretrained_models/s2D488k.pth", interactive=True) + pretrained_s1 = gr.Textbox(label="预训练的GPT模型路径", value="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", interactive=True) with gr.TabItem("1A-训练集格式化工具"): gr.Markdown(value="输出logs/实验名目录下应有23456开头的文件和文件夹") with gr.Row(): - inp_text = gr.Textbox( - label="*文本标注文件", - value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list", - interactive=True, - ) - inp_wav_dir = gr.Textbox( - label="*训练集音频文件目录", - value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", - interactive=True, - ) + inp_text = gr.Textbox(label="*文本标注文件",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True) + inp_wav_dir = gr.Textbox(label="*训练集音频文件目录",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",interactive=True) gr.Markdown(value="1Aa-文本内容") with gr.Row(): - gpu_numbers1a = gr.Textbox( - label="GPU卡号以-分割,每个卡号一个进程", - value="%s-%s" % (gpus, gpus), - interactive=True, - ) - bert_pretrained_dir = gr.Textbox( - label="预训练的中文BERT模型路径", - value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large", - interactive=False, - ) - button1a_open = gr.Button("开启文本获取", variant="primary", visible=True) - button1a_close = gr.Button( - "终止文本获取进程", variant="primary", visible=False - ) - info1a = gr.Textbox(label="文本进程输出信息") + gpu_numbers1a = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) + bert_pretrained_dir = gr.Textbox(label="预训练的中文BERT模型路径",value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False) + button1a_open = gr.Button("开启文本获取", variant="primary",visible=True) + button1a_close = gr.Button("终止文本获取进程", variant="primary",visible=False) + info1a=gr.Textbox(label="文本进程输出信息") gr.Markdown(value="1Ab-SSL自监督特征提取") with gr.Row(): - gpu_numbers1Ba = gr.Textbox( - label="GPU卡号以-分割,每个卡号一个进程", - value="%s-%s" % (gpus, gpus), - interactive=True, - ) - cnhubert_base_dir = gr.Textbox( - label="预训练的SSL模型路径", - value="GPT_SoVITS/pretrained_models/chinese-hubert-base", - interactive=False, - ) - button1b_open = gr.Button( - "开启SSL提取", variant="primary", visible=True - ) - button1b_close = gr.Button( - "终止SSL提取进程", variant="primary", visible=False - ) - info1b = gr.Textbox(label="SSL进程输出信息") + gpu_numbers1Ba = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) + cnhubert_base_dir = gr.Textbox(label="预训练的SSL模型路径",value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False) + button1b_open = gr.Button("开启SSL提取", variant="primary",visible=True) + button1b_close = gr.Button("终止SSL提取进程", variant="primary",visible=False) + info1b=gr.Textbox(label="SSL进程输出信息") gr.Markdown(value="1Ac-语义token提取") with gr.Row(): - gpu_numbers1c = gr.Textbox( - label="GPU卡号以-分割,每个卡号一个进程", - value="%s-%s" % (gpus, gpus), - interactive=True, - ) - button1c_open = gr.Button( - "开启语义token提取", variant="primary", visible=True - ) - button1c_close = gr.Button( - "终止语义token提取进程", variant="primary", visible=False - ) - info1c = gr.Textbox(label="语义token提取进程输出信息") + gpu_numbers1c = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) + button1c_open = gr.Button("开启语义token提取", variant="primary",visible=True) + button1c_close = gr.Button("终止语义token提取进程", variant="primary",visible=False) + info1c=gr.Textbox(label="语义token提取进程输出信息") gr.Markdown(value="1Aabc-训练集格式化一键三连") with gr.Row(): - button1abc_open = gr.Button( - "开启一键三连", variant="primary", visible=True - ) - button1abc_close = gr.Button( - "终止一键三连", variant="primary", visible=False - ) - info1abc = gr.Textbox(label="一键三连进程输出信息") - button1a_open.click( - open1a, - [inp_text, inp_wav_dir, exp_name, gpu_numbers1a, bert_pretrained_dir], - [info1a, button1a_open, button1a_close], - ) - button1a_close.click(close1a, [], [info1a, button1a_open, button1a_close]) - button1b_open.click( - open1b, - [inp_text, inp_wav_dir, exp_name, gpu_numbers1Ba, cnhubert_base_dir], - [info1b, button1b_open, button1b_close], - ) - button1b_close.click(close1b, [], [info1b, button1b_open, button1b_close]) - button1c_open.click( - open1c, - [inp_text, exp_name, gpu_numbers1c, pretrained_s2G], - [info1c, button1c_open, button1c_close], - ) - button1c_close.click(close1c, [], [info1c, button1c_open, button1c_close]) - button1abc_open.click( - open1abc, - [ - inp_text, - inp_wav_dir, - exp_name, - gpu_numbers1a, - gpu_numbers1Ba, - gpu_numbers1c, - bert_pretrained_dir, - cnhubert_base_dir, - pretrained_s2G, - ], - [info1abc, button1abc_open, button1abc_close], - ) - button1abc_close.click( - close1abc, [], [info1abc, button1abc_open, button1abc_close] - ) + button1abc_open = gr.Button("开启一键三连", variant="primary",visible=True) + button1abc_close = gr.Button("终止一键三连", variant="primary",visible=False) + info1abc=gr.Textbox(label="一键三连进程输出信息") + button1a_open.click(open1a, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,bert_pretrained_dir], [info1a,button1a_open,button1a_close]) + button1a_close.click(close1a, [], [info1a,button1a_open,button1a_close]) + button1b_open.click(open1b, [inp_text,inp_wav_dir,exp_name,gpu_numbers1Ba,cnhubert_base_dir], [info1b,button1b_open,button1b_close]) + button1b_close.click(close1b, [], [info1b,button1b_open,button1b_close]) + button1c_open.click(open1c, [inp_text,exp_name,gpu_numbers1c,pretrained_s2G], [info1c,button1c_open,button1c_close]) + button1c_close.click(close1c, [], [info1c,button1c_open,button1c_close]) + button1abc_open.click(open1abc, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G], [info1abc,button1abc_open,button1abc_close]) + button1abc_close.click(close1abc, [], [info1abc,button1abc_open,button1abc_close]) with gr.TabItem("1B-微调训练"): gr.Markdown(value="1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。") with gr.Row(): - batch_size = gr.Slider( - minimum=1, - maximum=40, - step=1, - label=i18n("每张显卡的batch_size"), - value=default_batch_size, - interactive=True, - ) - total_epoch = gr.Slider( - minimum=1, - maximum=20, - step=1, - label=i18n("总训练轮数total_epoch,不建议太高"), - value=8, - interactive=True, - ) - text_low_lr_rate = gr.Slider( - minimum=0.2, - maximum=0.6, - step=0.05, - label="文本模块学习率权重", - value=0.4, - interactive=True, - ) - save_every_epoch = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("保存频率save_every_epoch"), - value=4, - interactive=True, - ) - if_save_latest = gr.Checkbox( - label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), - value=True, - interactive=True, - show_label=True, - ) - if_save_every_weights = gr.Checkbox( - label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), - value=True, - interactive=True, - show_label=True, - ) - gpu_numbers1Ba = gr.Textbox( - label="GPU卡号以-分割,每个卡号一个进程", - value="%s" % (gpus), - interactive=True, - ) + batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) + total_epoch = gr.Slider(minimum=1,maximum=20,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) + text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label="文本模块学习率权重",value=0.4,interactive=True) + save_every_epoch = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) + if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) + if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) + gpu_numbers1Ba = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程", value="%s" % (gpus), interactive=True) with gr.Row(): - button1Ba_open = gr.Button( - "开启SoVITS训练", variant="primary", visible=True - ) - button1Ba_close = gr.Button( - "终止SoVITS训练", variant="primary", visible=False - ) - info1Ba = gr.Textbox(label="SoVITS训练进程输出信息") + button1Ba_open = gr.Button("开启SoVITS训练", variant="primary",visible=True) + button1Ba_close = gr.Button("终止SoVITS训练", variant="primary",visible=False) + info1Ba=gr.Textbox(label="SoVITS训练进程输出信息") gr.Markdown(value="1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。") with gr.Row(): - batch_size1Bb = gr.Slider( - minimum=1, - maximum=40, - step=1, - label=i18n("每张显卡的batch_size"), - value=default_batch_size, - interactive=True, - ) - total_epoch1Bb = gr.Slider( - minimum=2, - maximum=100, - step=1, - label=i18n("总训练轮数total_epoch"), - value=15, - interactive=True, - ) - if_save_latest1Bb = gr.Checkbox( - label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), - value=True, - interactive=True, - show_label=True, - ) - if_save_every_weights1Bb = gr.Checkbox( - label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), - value=True, - interactive=True, - show_label=True, - ) - save_every_epoch1Bb = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("保存频率save_every_epoch"), - value=5, - interactive=True, - ) - gpu_numbers1Bb = gr.Textbox( - label="GPU卡号以-分割,每个卡号一个进程", - value="%s" % (gpus), - interactive=True, - ) + batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) + total_epoch1Bb = gr.Slider(minimum=2,maximum=100,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True) + if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) + if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) + save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True) + gpu_numbers1Bb = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程", value="%s" % (gpus), interactive=True) with gr.Row(): - button1Bb_open = gr.Button( - "开启GPT训练", variant="primary", visible=True - ) - button1Bb_close = gr.Button( - "终止GPT训练", variant="primary", visible=False - ) - info1Bb = gr.Textbox(label="GPT训练进程输出信息") - button1Ba_open.click( - open1Ba, - [ - batch_size, - total_epoch, - exp_name, - text_low_lr_rate, - if_save_latest, - if_save_every_weights, - save_every_epoch, - gpu_numbers1Ba, - pretrained_s2G, - pretrained_s2D, - ], - [info1Ba, button1Ba_open, button1Ba_close], - ) - button1Ba_close.click( - close1Ba, [], [info1Ba, button1Ba_open, button1Ba_close] - ) - button1Bb_open.click( - open1Bb, - [ - batch_size1Bb, - total_epoch1Bb, - exp_name, - if_save_latest1Bb, - if_save_every_weights1Bb, - save_every_epoch1Bb, - gpu_numbers1Bb, - pretrained_s1, - ], - [info1Bb, button1Bb_open, button1Bb_close], - ) - button1Bb_close.click( - close1Bb, [], [info1Bb, button1Bb_open, button1Bb_close] - ) + button1Bb_open = gr.Button("开启GPT训练", variant="primary",visible=True) + button1Bb_close = gr.Button("终止GPT训练", variant="primary",visible=False) + info1Bb=gr.Textbox(label="GPT训练进程输出信息") + button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Ba,button1Ba_open,button1Ba_close]) + button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close]) + button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close]) + button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close]) with gr.TabItem("1C-推理"): - gr.Markdown( - value="选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。" - ) + gr.Markdown(value="选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。") with gr.Row(): - GPT_dropdown = gr.Dropdown( - label="*GPT模型列表", - choices=sorted(GPT_names), - value=pretrained_gpt_name, - ) - SoVITS_dropdown = gr.Dropdown( - label="*SoVITS模型列表", - choices=sorted(SoVITS_names), - value=pretrained_sovits_name, - ) - gpu_number_1C = gr.Textbox( - label="GPU卡号,只能填1个整数", value=gpus, interactive=True - ) + GPT_dropdown = gr.Dropdown(label="*GPT模型列表", choices=sorted(GPT_names),value=pretrained_gpt_name) + SoVITS_dropdown = gr.Dropdown(label="*SoVITS模型列表", choices=sorted(SoVITS_names),value=pretrained_sovits_name) + gpu_number_1C=gr.Textbox(label="GPU卡号,只能填1个整数", value=gpus, interactive=True) refresh_button = gr.Button("刷新模型路径", variant="primary") - refresh_button.click( - fn=change_choices, - inputs=[], - outputs=[SoVITS_dropdown, GPT_dropdown], - ) + refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown]) with gr.Row(): if_tts = gr.Checkbox(label="是否开启TTS推理WebUI", show_label=True) tts_info = gr.Textbox(label="TTS推理WebUI进程输出信息") - if_tts.change( - change_tts_inference, - [ - if_tts, - bert_pretrained_dir, - cnhubert_base_dir, - gpu_number_1C, - GPT_dropdown, - SoVITS_dropdown, - ], - [tts_info], - ) - with gr.TabItem("2-GPT-SoVITS-变声"): - gr.Markdown(value="施工中,请静候佳音") + if_tts.change(change_tts_inference, [if_tts,bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown], [tts_info]) + with gr.TabItem("2-GPT-SoVITS-变声"):gr.Markdown(value="施工中,请静候佳音") - """ + ''' os.environ["gpt_path"]=gpt_path os.environ["sovits_path"]=sovits_path#bert_pretrained_dir os.environ["cnhubert_base_path"]=cnhubert_base_path#cnhubert_base_dir os.environ["bert_path"]=bert_path os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number - """ + ''' app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", From d2d43437a8e95343055088b0a32ad04b23119d18 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 00:31:02 +0800 Subject: [PATCH 06/46] Add files via upload --- .../prepare_datasets/2-get-hubert-wav32k.py | 110 ++++++++---------- 1 file changed, 48 insertions(+), 62 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 25cb4a83..1a5de8c6 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -1,23 +1,20 @@ # -*- coding: utf-8 -*- -import sys, os - -inp_text = os.environ.get("inp_text") -inp_wav_dir = os.environ.get("inp_wav_dir") -exp_name = os.environ.get("exp_name") -i_part = os.environ.get("i_part") -all_parts = os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES") +import sys,os +inp_text= os.environ.get("inp_text") +inp_wav_dir= os.environ.get("inp_wav_dir") +exp_name= os.environ.get("exp_name") +i_part= os.environ.get("i_part") +all_parts= os.environ.get("all_parts") +os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") from feature_extractor import cnhubert +opt_dir= os.environ.get("opt_dir") +cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir") +is_half=eval(os.environ.get("is_half","True")) -opt_dir = os.environ.get("opt_dir") -cnhubert.cnhubert_base_path = os.environ.get("cnhubert_base_dir") -is_half = eval(os.environ.get("is_half", "True")) - -import pdb, traceback, numpy as np, logging +import pdb,traceback,numpy as np,logging from scipy.io import wavfile -import librosa, torch - +import librosa,torch now_dir = os.getcwd() sys.path.append(now_dir) from my_utils import load_audio @@ -35,75 +32,64 @@ from my_utils import load_audio from time import time as ttime import shutil +def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path + dir=os.path.dirname(path) + name=os.path.basename(path) + tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part) + torch.save(fea,tmp_path) + shutil.move(tmp_path,"%s/%s"%(dir,name)) +hubert_dir="%s/4-cnhubert"%(opt_dir) +wav32dir="%s/5-wav32k"%(opt_dir) +os.makedirs(opt_dir,exist_ok=True) +os.makedirs(hubert_dir,exist_ok=True) +os.makedirs(wav32dir,exist_ok=True) -def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path - dir = os.path.dirname(path) - name = os.path.basename(path) - tmp_path = "%s/%s%s.pth" % (dir, ttime(), i_part) - torch.save(fea, tmp_path) - shutil.move(tmp_path, "%s/%s" % (dir, name)) - - -hubert_dir = "%s/4-cnhubert" % (opt_dir) -wav32dir = "%s/5-wav32k" % (opt_dir) -os.makedirs(opt_dir, exist_ok=True) -os.makedirs(hubert_dir, exist_ok=True) -os.makedirs(wav32dir, exist_ok=True) - -maxx = 0.95 -alpha = 0.5 -device = "cuda:0" -model = cnhubert.get_model() -if is_half == True: - model = model.half().to(device) +maxx=0.95 +alpha=0.5 +device="cuda:0" +model=cnhubert.get_model() +if(is_half==True): + model=model.half().to(device) else: model = model.to(device) - - def name2go(wav_name): - hubert_path = "%s/%s.pt" % (hubert_dir, wav_name) - if os.path.exists(hubert_path): - return - wav_path = "%s/%s" % (inp_wav_dir, wav_name) + hubert_path="%s/%s.pt"%(hubert_dir,wav_name) + if(os.path.exists(hubert_path)):return + if(inp_wav_dir!=""): + wav_path="%s/%s"%(inp_wav_dir,wav_name) tmp_audio = load_audio(wav_path, 32000) tmp_max = np.abs(tmp_audio).max() if tmp_max > 2.2: print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max)) return - tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha * 32768)) + ( - (1 - alpha) * 32768 - ) * tmp_audio - tmp_audio = librosa.resample(tmp_audio32, orig_sr=32000, target_sr=16000) + tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio + tmp_audio = librosa.resample( + tmp_audio32, orig_sr=32000, target_sr=16000 + ) tensor_wav16 = torch.from_numpy(tmp_audio) - if is_half == True: - tensor_wav16 = tensor_wav16.half().to(device) + if (is_half == True): + tensor_wav16=tensor_wav16.half().to(device) else: tensor_wav16 = tensor_wav16.to(device) - ssl = ( - model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"] - .transpose(1, 2) - .cpu() - ) # torch.Size([1, 768, 215]) - if np.isnan(ssl.detach().numpy()).sum() != 0: - return + ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215]) + if np.isnan(ssl.detach().numpy()).sum()!= 0:return wavfile.write( - "%s/%s" % (wav32dir, wav_name), + "%s/%s"%(wav32dir,wav_name), 32000, tmp_audio32.astype("int16"), ) # torch.save(ssl,hubert_path ) - my_save(ssl, hubert_path) + my_save(ssl,hubert_path ) +with open(inp_text,"r",encoding="utf8")as f: + lines=f.read().strip("\n").split("\n") -with open(inp_text, "r", encoding="utf8") as f: - lines = f.read().strip("\n").split("\n") - -for line in lines[int(i_part) :: int(all_parts)]: +for line in lines[int(i_part)::int(all_parts)]: try: # wav_name,text=line.split("\t") wav_name, spk_name, language, text = line.split("|") - wav_name = os.path.basename(wav_name) + wav_name=os.path.basename(wav_name) name2go(wav_name) except: - print(line, traceback.format_exc()) + print(line,traceback.format_exc()) From 9619223bc38f31933eaa9eb50d6ca055a6317ee3 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 00:58:08 +0800 Subject: [PATCH 07/46] Update README.md --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 821dda57..2adecf06 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. - [ ] **High Priority:** - [ ] Localization in Japanese and English. - [ ] User guide. + - [ ] Japanese and English dataset fine tune training. - [ ] **Features:** - [ ] Zero-shot voice conversion (5s) / few-shot voice conversion (1min). @@ -131,7 +132,9 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. - [ ] Develop tiny and larger-sized TTS models. - [ ] Colab scripts. - [ ] Expand training dataset (2k -> 10k). - + - [ ] better sovits base model (enhanced audio quality) + - [ ] model mix + ## Credits Special thanks to the following projects and contributors: From ee1d99ab3caeda97ba2a3a2d4d913fcd2d3c0ecc Mon Sep 17 00:00:00 2001 From: Ilaria <108286953+TheStingerX@users.noreply.github.com> Date: Thu, 18 Jan 2024 01:36:11 +0100 Subject: [PATCH 08/46] Fixed i18n error Line 27 of webui.py contains an import from i18n.i18n but there was no module in the requirements. I added the module i18n. Line for context: "from i18n.i18n import I18nAuto" --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 7c613b35..d4b6303f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,4 @@ sentencepiece transformers chardet PyYAML +i18n From 0d9a04cf60f55c2d0b79d43e7e9916ce54a343ee Mon Sep 17 00:00:00 2001 From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Date: Thu, 18 Jan 2024 10:51:54 +0800 Subject: [PATCH 09/46] Change i18n folder --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index dbccba79..3dd1bfdb 100644 --- a/webui.py +++ b/webui.py @@ -24,7 +24,7 @@ import gradio as gr from subprocess import Popen import signal from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix -from i18n.i18n import I18nAuto +from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio From 678616c0edfde97b0c249bb75c4c4b1ef84de206 Mon Sep 17 00:00:00 2001 From: Erythrocyte3803 <2544390577@qq.com> Date: Thu, 18 Jan 2024 14:04:43 +0900 Subject: [PATCH 10/46] fixed i18n.i18n not found error --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index dbccba79..3dd1bfdb 100644 --- a/webui.py +++ b/webui.py @@ -24,7 +24,7 @@ import gradio as gr from subprocess import Popen import signal from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix -from i18n.i18n import I18nAuto +from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio From 3a167888e2f6199f8089208e60d2bb80f797a355 Mon Sep 17 00:00:00 2001 From: Ke Date: Thu, 18 Jan 2024 14:55:38 +0800 Subject: [PATCH 11/46] Kill process in Linux platform Add a function to kill process and its children recusively in Linux platform. --- webui.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index dbccba79..838e98bd 100644 --- a/webui.py +++ b/webui.py @@ -1,5 +1,8 @@ import json,yaml,warnings,torch import platform +import psutil +import os +import signal warnings.filterwarnings("ignore") torch.manual_seed(233333) @@ -30,7 +33,7 @@ from scipy.io import wavfile from tools.my_utils import load_audio from multiprocessing import cpu_count n_cpu=cpu_count() - + # 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() gpu_infos = [] @@ -78,15 +81,33 @@ p_uvr5=None p_asr=None p_tts_inference=None +def kill_proc_tree(pid, including_parent=True): + try: + parent = psutil.Process(pid) + except psutil.NoSuchProcess: + # Process already terminated + return + + children = parent.children(recursive=True) + for child in children: + try: + os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + if including_parent: + try: + os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + system=platform.system() def kill_process(pid): if(system=="Windows"): cmd = "taskkill /t /f /pid %s" % pid + os.system(cmd) else: - cmd = "kill -9 %s"%pid - print(cmd) - os.system(cmd)###linux上杀了webui,可能还会没杀干净。。。 - # os.kill(p_label.pid,19)#主进程#控制台进程#python子进程###不好使,连主进程的webui一起关了,辣鸡 + kill_proc_tree(pid) + def change_label(if_label,path_list): global p_label From 47c7c45e78a1ba140e09e9e500811bc0f706dbea Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 15:02:24 +0800 Subject: [PATCH 12/46] Update requirements.txt --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d4b6303f..7c613b35 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,3 @@ sentencepiece transformers chardet PyYAML -i18n From dbbf616762350ed043ca1e699fd8b9abee22e709 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 17:58:10 +0800 Subject: [PATCH 13/46] Update webui.py --- webui.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index 94a83cd3..d799dcfa 100644 --- a/webui.py +++ b/webui.py @@ -12,13 +12,19 @@ tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp import site -site_packages_root="%s/runtime/Lib/site-packages"%now_dir +site_packages_roots = [] for path in site.getsitepackages(): - if("site-packages"in path):site_packages_root=path -os.environ["OPENBLAS_NUM_THREADS"] = "4" + if "packages" in path: + site_packages_roots.append(path) +if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir] +#os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -with open("%s/users.pth"%(site_packages_root),"w")as f: - f.write("%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"%(now_dir,now_dir,now_dir,now_dir,now_dir)) +for site_packages_root in site_packages_roots: + with open("%s/users.pth" % (site_packages_root), "w") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) import traceback sys.path.append(now_dir) import shutil From e2ddf97c313f672f14a83562e8fed8f7934bff4b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 18:46:11 +0800 Subject: [PATCH 14/46] Add files via upload --- webui.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index d799dcfa..8f238544 100644 --- a/webui.py +++ b/webui.py @@ -317,11 +317,12 @@ ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): global ps1a if (ps1a == []): + opt_dir="%s/%s"%(exp_root,exp_name) config={ "inp_text":inp_text, "inp_wav_dir":inp_wav_dir, "exp_name":exp_name, - "opt_dir":"%s/%s"%(exp_root,exp_name), + "opt_dir":opt_dir, "bert_pretrained_dir":bert_pretrained_dir, } gpu_names=gpu_numbers.split("-") @@ -335,7 +336,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): "is_half": str(is_half) } ) - os.environ.update(config) + os.environ.update(config)# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec print(cmd) p = Popen(cmd, shell=True) @@ -343,6 +344,15 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() + opt = [] + for i_part in range(all_parts): + txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) + with open(txt_path, "r", encoding="utf8") as f: + opt += f.read().strip("\n").split("\n") + os.remove(txt_path) + path_text = "%s/2-name2text.txt" % opt_dir + with open(path_text, "w", encoding="utf8") as f: + f.write("\n".join(opt) + "\n") ps1a=[] yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: @@ -426,10 +436,11 @@ ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): global ps1c if (ps1c == []): + opt_dir="%s/%s"%(exp_root,exp_name) config={ "inp_text":inp_text, "exp_name":exp_name, - "opt_dir":"%s/%s"%(exp_root,exp_name), + "opt_dir":opt_dir, "pretrained_s2G":pretrained_s2G_path, "s2config_path":"GPT_SoVITS/configs/s2.json", "is_half": str(is_half) @@ -452,6 +463,15 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() + opt = ["item_name semantic_audio"] + path_semantic = "%s/6-name2semantic.tsv" % opt_dir + for i_part in range(all_parts): + semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) + with open(semantic_path, "r", encoding="utf8") as f: + opt += f.read().strip("\n").split("\n") + os.remove(semantic_path) + with open(path_semantic, "w", encoding="utf8") as f: + f.write("\n".join(opt) + "\n") ps1c=[] yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: @@ -476,7 +496,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb try: #############################1a path_text="%s/2-name2text.txt" % opt_dir - if(os.path.exists(path_text)==False): + if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and os.path.getsize(path_text)<10)): config={ "inp_text":inp_text, "inp_wav_dir":inp_wav_dir, @@ -543,7 +563,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir - if(os.path.exists(path_semantic)==False): + if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<28)): config={ "inp_text":inp_text, "exp_name":exp_name, From 54bd2b796163732bc2b31fa51ee98dc4e75025c8 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 20:03:17 +0800 Subject: [PATCH 15/46] Update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 7c613b35..2e640334 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,4 @@ sentencepiece transformers chardet PyYAML +psutil From 230bf5bae6f85ec1b235b48fefe6ea6dba87324b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 20:22:42 +0800 Subject: [PATCH 16/46] Add files via upload --- webui.py | 51 ++++++++------------------------------------------- 1 file changed, 8 insertions(+), 43 deletions(-) diff --git a/webui.py b/webui.py index 8f238544..cdc87b7f 100644 --- a/webui.py +++ b/webui.py @@ -179,10 +179,6 @@ def close_asr(): p_asr=None return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} -''' - button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Bb,button1Ba_open,button1Ba_close]) - button1Ba_close.click(close1Ba, [], [info1Bb,button1Ba_open,button1Ba_close]) -''' p_train_SoVITS=None def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): global p_train_SoVITS @@ -303,16 +299,6 @@ def close_slice(): ps_slice=[] return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -inp_wav_dir= os.environ.get("inp_wav_dir") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir")#"/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name -bert_pretrained_dir= os.environ.get("bert_pretrained_dir")#"/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large" -''' ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): global ps1a @@ -368,16 +354,7 @@ def close1a(): traceback.print_exc() ps1a=[] return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -inp_wav_dir= os.environ.get("inp_wav_dir") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir") -cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir") -''' + ps1b=[] def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): global ps1b @@ -423,15 +400,7 @@ def close1b(): traceback.print_exc() ps1b=[] return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir") -pretrained_s2G= os.environ.get("pretrained_s2G") -''' + ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): global ps1c @@ -682,7 +651,12 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown(value="输出logs/实验名目录下应有23456开头的文件和文件夹") with gr.Row(): inp_text = gr.Textbox(label="*文本标注文件",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True) - inp_wav_dir = gr.Textbox(label="*训练集音频文件目录",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",interactive=True) + inp_wav_dir = gr.Textbox( + label="*训练集音频文件目录", + # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", + interactive=True, + placeholder="训练集音频文件目录拼list文件的目录。如果list文件已经是绝对路径,这里应该为空。" + ) gr.Markdown(value="1Aa-文本内容") with gr.Row(): gpu_numbers1a = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) @@ -759,15 +733,6 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: tts_info = gr.Textbox(label="TTS推理WebUI进程输出信息") if_tts.change(change_tts_inference, [if_tts,bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown], [tts_info]) with gr.TabItem("2-GPT-SoVITS-变声"):gr.Markdown(value="施工中,请静候佳音") - - ''' - os.environ["gpt_path"]=gpt_path - os.environ["sovits_path"]=sovits_path#bert_pretrained_dir - os.environ["cnhubert_base_path"]=cnhubert_base_path#cnhubert_base_dir - os.environ["bert_path"]=bert_path - os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number - ''' - app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, From 48509304992f541fda734e3671942366a87f9f8f Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 20:23:04 +0800 Subject: [PATCH 17/46] Update 2-get-hubert-wav32k.py --- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 1a5de8c6..a5075ff4 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -56,8 +56,7 @@ else: def name2go(wav_name): hubert_path="%s/%s.pt"%(hubert_dir,wav_name) if(os.path.exists(hubert_path)):return - if(inp_wav_dir!=""): - wav_path="%s/%s"%(inp_wav_dir,wav_name) + wav_path="%s/%s"%(inp_wav_dir,wav_name) tmp_audio = load_audio(wav_path, 32000) tmp_max = np.abs(tmp_audio).max() if tmp_max > 2.2: From cc33a767ebc7f96c91f8222037b662b3b138fe2a Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 20:24:19 +0800 Subject: [PATCH 18/46] Update webui.py --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index cdc87b7f..58821ced 100644 --- a/webui.py +++ b/webui.py @@ -655,7 +655,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: label="*训练集音频文件目录", # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", interactive=True, - placeholder="训练集音频文件目录拼list文件的目录。如果list文件已经是绝对路径,这里应该为空。" + placeholder="训练集音频文件目录 拼接 list文件里波形对应的文件名。" ) gr.Markdown(value="1Aa-文本内容") with gr.Row(): From 1afdb42295fc40de1328564f49755ceb8692f65b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 18 Jan 2024 22:12:16 +0800 Subject: [PATCH 19/46] Update webui.py --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 58821ced..f958a074 100644 --- a/webui.py +++ b/webui.py @@ -532,7 +532,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir - if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<28)): + if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)): config={ "inp_text":inp_text, "exp_name":exp_name, From 6dcaf262addba7c2309192152e9794574b59bef3 Mon Sep 17 00:00:00 2001 From: DW <147780325+D3lik@users.noreply.github.com> Date: Fri, 19 Jan 2024 07:34:10 +1100 Subject: [PATCH 20/46] Update webui.py --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index f958a074..6f3391cc 100644 --- a/webui.py +++ b/webui.py @@ -736,6 +736,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, + share=True, server_port=webui_port_main, quiet=True, ) From 79708faed48458c7d045a3682f3d3c92ff6a2bfe Mon Sep 17 00:00:00 2001 From: Ke Date: Fri, 19 Jan 2024 10:13:17 +0800 Subject: [PATCH 21/46] Disable debug level logging When using `inference_webui.py`, it produces debug level info for http requests, for example: ``` DEBUG:httpcore.http11:response_closed.started ``` Here I changed it to warning level. --- GPT_SoVITS/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPT_SoVITS/utils.py b/GPT_SoVITS/utils.py index 0ce03b33..e1a66ea1 100644 --- a/GPT_SoVITS/utils.py +++ b/GPT_SoVITS/utils.py @@ -18,7 +18,7 @@ logging.getLogger("matplotlib").setLevel(logging.ERROR) MATPLOTLIB_FLAG = False -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) +logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logger = logging @@ -310,13 +310,13 @@ def check_git_hash(model_dir): def get_logger(model_dir, filename="train.log"): global logger logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) + logger.setLevel(logging.WARNING) formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") if not os.path.exists(model_dir): os.makedirs(model_dir) h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) + h.setLevel(logging.WARNING) h.setFormatter(formatter) logger.addHandler(h) return logger From 76164a07749538bb251b3279992b77b4c1ae4fc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=AC=AC=E7=B4=97=E7=89=B9?= <66856838+Miuzarte@users.noreply.github.com> Date: Fri, 19 Jan 2024 14:08:31 +0800 Subject: [PATCH 22/46] Add api.py --- api.py | 324 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 324 insertions(+) create mode 100644 api.py diff --git a/api.py b/api.py new file mode 100644 index 00000000..41ddbf66 --- /dev/null +++ b/api.py @@ -0,0 +1,324 @@ +import argparse +import os +import signal +import sys +from time import time as ttime +import torch +import librosa +import soundfile as sf +from fastapi import FastAPI, Request, HTTPException +from fastapi.responses import StreamingResponse +import uvicorn +from transformers import AutoModelForMaskedLM, AutoTokenizer +import numpy as np +from feature_extractor import cnhubert +from io import BytesIO +from module.models import SynthesizerTrn +from AR.models.t2s_lightning_module import Text2SemanticLightningModule +from text import cleaned_text_to_sequence +from text.cleaner import clean_text +from module.mel_processing import spectrogram_torch +from my_utils import load_audio + +DEFAULT_PORT = 9880 +DEFAULT_CNHUBERT = "GPT_SoVITS/pretrained_models/chinese-hubert-base" +DEFAULT_BERT = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" +DEFAULT_HALF = True + +DEFAULT_GPT = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" +DEFAULT_SOVITS = "GPT_SoVITS/pretrained_models/s2G488k.pth" + +AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu" + +parser = argparse.ArgumentParser(description="GPT-SoVITS api") + +parser.add_argument("-g", "--gpt_path", type=str, default="", help="GPT模型路径") +parser.add_argument("-s", "--sovits_path", type=str, default="", help="SoVITS模型路径") + +parser.add_argument("-dr", "--default_refer_path", type=str, default="", + help="默认参考音频路径, 请求缺少参考音频时调用") +parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") +parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") + +parser.add_argument("-d", "--device", type=str, default=AVAILABLE_COMPUTE, help="cuda / cpu") +parser.add_argument("-p", "--port", type=int, default=DEFAULT_PORT, help="default: 9880") +parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") +parser.add_argument("-hp", "--half_precision", action='store_true', default=False) + +parser.add_argument("-hb", "--hubert_path", type=str, default=DEFAULT_CNHUBERT) +parser.add_argument("-b", "--bert_path", type=str, default=DEFAULT_BERT) + +args = parser.parse_args() + +gpt_path = args.gpt_path +sovits_path = args.sovits_path + +default_refer_path = args.default_refer_path +default_refer_text = args.default_refer_text +default_refer_language = args.default_refer_language +has_preset = False + +device = args.device +port = args.port +host = args.bind_addr +is_half = args.half_precision + +cnhubert_base_path = args.hubert_path +bert_path = args.bert_path + +if gpt_path == "": + gpt_path = DEFAULT_GPT + print("[WARN] 未指定GPT模型路径") +if sovits_path == "": + sovits_path = DEFAULT_SOVITS + print("[WARN] 未指定SoVITS模型路径") + +if default_refer_path == "" or default_refer_text == "" or default_refer_language == "": + default_refer_path, default_refer_text, default_refer_language = "", "", "" + print("[INFO] 未指定默认参考音频") + has_preset = False +else: + print(f"[INFO] 默认参考音频路径: {default_refer_path}") + print(f"[INFO] 默认参考音频文本: {default_refer_text}") + print(f"[INFO] 默认参考音频语种: {default_refer_language}") + has_preset = True + +cnhubert.cnhubert_base_path = cnhubert_base_path +tokenizer = AutoTokenizer.from_pretrained(bert_path) +bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) +# bert_model = AutoModelForSequenceClassification.from_pretrained(bert_path, config=bert_path+"/config.json") +if (is_half == True): + bert_model = bert_model.half().to(device) +else: + bert_model = bert_model.to(device) + + +# bert_model=bert_model.to(device) +def get_bert_feature(text, word2ph): + with torch.no_grad(): + inputs = tokenizer(text, return_tensors="pt") + for i in inputs: + inputs[i] = inputs[i].to(device) #####输入是long不用管精度问题,精度随bert_model + res = bert_model(**inputs, output_hidden_states=True) + res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1] + assert len(word2ph) == len(text) + phone_level_feature = [] + for i in range(len(word2ph)): + repeat_feature = res[i].repeat(word2ph[i], 1) + phone_level_feature.append(repeat_feature) + phone_level_feature = torch.cat(phone_level_feature, dim=0) + # if(is_half==True):phone_level_feature=phone_level_feature.half() + return phone_level_feature.T + + +n_semantic = 1024 +dict_s2 = torch.load(sovits_path, map_location="cpu") +hps = dict_s2["config"] + + +class DictToAttrRecursive: + def __init__(self, input_dict): + for key, value in input_dict.items(): + if isinstance(value, dict): + # 如果值是字典,递归调用构造函数 + setattr(self, key, DictToAttrRecursive(value)) + else: + setattr(self, key, value) + + +hps = DictToAttrRecursive(hps) +hps.model.semantic_frame_rate = "25hz" +dict_s1 = torch.load(gpt_path, map_location="cpu") +config = dict_s1["config"] +ssl_model = cnhubert.get_model() +if is_half: + ssl_model = ssl_model.half().to(device) +else: + ssl_model = ssl_model.to(device) + +vq_model = SynthesizerTrn( + hps.data.filter_length // 2 + 1, + hps.train.segment_size // hps.data.hop_length, + n_speakers=hps.data.n_speakers, + **hps.model) +if is_half: + vq_model = vq_model.half().to(device) +else: + vq_model = vq_model.to(device) +vq_model.eval() +print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) +hz = 50 +max_sec = config['data']['max_sec'] +t2s_model = Text2SemanticLightningModule(config, "ojbk", is_train=False) +t2s_model.load_state_dict(dict_s1["weight"]) +if is_half: + t2s_model = t2s_model.half() +t2s_model = t2s_model.to(device) +t2s_model.eval() +total = sum([param.nelement() for param in t2s_model.parameters()]) +print("Number of parameter: %.2fM" % (total / 1e6)) + + +def get_spepc(hps, filename): + audio = load_audio(filename, int(hps.data.sampling_rate)) + audio = torch.FloatTensor(audio) + audio_norm = audio + audio_norm = audio_norm.unsqueeze(0) + spec = spectrogram_torch(audio_norm, hps.data.filter_length, hps.data.sampling_rate, hps.data.hop_length, + hps.data.win_length, center=False) + return spec + + +dict_language = { + "中文": "zh", + "英文": "en", + "日文": "ja", + "ZH": "zh", + "EN": "en", + "JA": "ja", + "zh": "zh", + "en": "en", + "ja": "ja" +} + + +def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): + t0 = ttime() + prompt_text = prompt_text.strip("\n") + prompt_language, text = prompt_language, text.strip("\n") + with torch.no_grad(): + wav16k, sr = librosa.load(ref_wav_path, sr=16000) # 派蒙 + wav16k = torch.from_numpy(wav16k) + if (is_half == True): + wav16k = wav16k.half().to(device) + else: + wav16k = wav16k.to(device) + ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2) # .float() + codes = vq_model.extract_latent(ssl_content) + prompt_semantic = codes[0, 0] + t1 = ttime() + prompt_language = dict_language[prompt_language] + text_language = dict_language[text_language] + phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language) + phones1 = cleaned_text_to_sequence(phones1) + texts = text.split("\n") + audio_opt = [] + zero_wav = np.zeros(int(hps.data.sampling_rate * 0.3), dtype=np.float16 if is_half == True else np.float32) + for text in texts: + phones2, word2ph2, norm_text2 = clean_text(text, text_language) + phones2 = cleaned_text_to_sequence(phones2) + if (prompt_language == "zh"): + bert1 = get_bert_feature(norm_text1, word2ph1).to(device) + else: + bert1 = torch.zeros((1024, len(phones1)), dtype=torch.float16 if is_half == True else torch.float32).to( + device) + if (text_language == "zh"): + bert2 = get_bert_feature(norm_text2, word2ph2).to(device) + else: + bert2 = torch.zeros((1024, len(phones2))).to(bert1) + bert = torch.cat([bert1, bert2], 1) + + all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) + bert = bert.to(device).unsqueeze(0) + all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) + prompt = prompt_semantic.unsqueeze(0).to(device) + t2 = ttime() + with torch.no_grad(): + # pred_semantic = t2s_model.model.infer( + pred_semantic, idx = t2s_model.model.infer_panel( + all_phoneme_ids, + all_phoneme_len, + prompt, + bert, + # prompt_phone_len=ph_offset, + top_k=config['inference']['top_k'], + early_stop_num=hz * max_sec) + t3 = ttime() + # print(pred_semantic.shape,idx) + pred_semantic = pred_semantic[:, -idx:].unsqueeze(0) # .unsqueeze(0)#mq要多unsqueeze一次 + refer = get_spepc(hps, ref_wav_path) # .to(device) + if (is_half == True): + refer = refer.half().to(device) + else: + refer = refer.to(device) + # audio = vq_model.decode(pred_semantic, all_phoneme_ids, refer).detach().cpu().numpy()[0, 0] + audio = \ + vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), + refer).detach().cpu().numpy()[ + 0, 0] ###试试重建不带上prompt部分 + audio_opt.append(audio) + audio_opt.append(zero_wav) + t4 = ttime() + print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) + yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) + + +def restart(): + python = sys.executable + os.execl(python, python, *sys.argv) + + +def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): + if command == "/restart": + restart() + elif command == "/exit": + os.kill(os.getpid(), signal.SIGTERM) + exit(0) + + if ( + refer_wav_path == "" or refer_wav_path is None + or prompt_text == "" or prompt_text is None + or prompt_language == "" or prompt_language is None + ): + refer_wav_path, prompt_text, prompt_language = ( + default_refer_path, + default_refer_text, + default_refer_language, + ) + if not has_preset: + raise HTTPException(status_code=400, detail="未指定参考音频且接口无预设") + + with torch.no_grad(): + gen = get_tts_wav( + refer_wav_path, prompt_text, prompt_language, text, text_language + ) + sampling_rate, audio_data = next(gen) + + wav = BytesIO() + sf.write(wav, audio_data, sampling_rate, format="wav") + wav.seek(0) + + torch.cuda.empty_cache() + return StreamingResponse(wav, media_type="audio/wav") + + +app = FastAPI() + + +@app.post("/") +async def tts_endpoint(request: Request): + json_post_raw = await request.json() + return handle( + json_post_raw.get("command"), + json_post_raw.get("refer_wav_path"), + json_post_raw.get("prompt_text"), + json_post_raw.get("prompt_language"), + json_post_raw.get("text"), + json_post_raw.get("text_language"), + ) + + +@app.get("/") +async def tts_endpoint( + command: str = None, + refer_wav_path: str = None, + prompt_text: str = None, + prompt_language: str = None, + text: str = None, + text_language: str = None, +): + return handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language) + + +if __name__ == "__main__": + uvicorn.run(app, host=host, port=port, workers=1) From 192668435b36502bef3bce259e5543266bf5f45e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=AC=AC=E7=B4=97=E7=89=B9?= <66856838+Miuzarte@users.noreply.github.com> Date: Fri, 19 Jan 2024 14:15:35 +0800 Subject: [PATCH 23/46] Match config.py --- api.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/api.py b/api.py index 41ddbf66..8a476cd6 100644 --- a/api.py +++ b/api.py @@ -19,16 +19,17 @@ from text import cleaned_text_to_sequence from text.cleaner import clean_text from module.mel_processing import spectrogram_torch from my_utils import load_audio +from config import python_exec, infer_device, is_half, api_port -DEFAULT_PORT = 9880 +DEFAULT_PORT = api_port DEFAULT_CNHUBERT = "GPT_SoVITS/pretrained_models/chinese-hubert-base" DEFAULT_BERT = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" -DEFAULT_HALF = True +DEFAULT_HALF = is_half DEFAULT_GPT = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" DEFAULT_SOVITS = "GPT_SoVITS/pretrained_models/s2G488k.pth" -AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu" +# AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser(description="GPT-SoVITS api") @@ -40,7 +41,7 @@ parser.add_argument("-dr", "--default_refer_path", type=str, default="", parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") -parser.add_argument("-d", "--device", type=str, default=AVAILABLE_COMPUTE, help="cuda / cpu") +parser.add_argument("-d", "--device", type=str, default=infer_device, help="cuda / cpu") parser.add_argument("-p", "--port", type=int, default=DEFAULT_PORT, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") parser.add_argument("-hp", "--half_precision", action='store_true', default=False) @@ -253,14 +254,9 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) -def restart(): - python = sys.executable - os.execl(python, python, *sys.argv) - - def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): if command == "/restart": - restart() + os.execl(python_exec, python_exec, *sys.argv) elif command == "/exit": os.kill(os.getpid(), signal.SIGTERM) exit(0) From 18c390768298d5950c6f1679e391a32f372b0d9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=AC=AC=E7=B4=97=E7=89=B9?= <66856838+Miuzarte@users.noreply.github.com> Date: Fri, 19 Jan 2024 14:25:15 +0800 Subject: [PATCH 24/46] Update config.py --- config.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/config.py b/config.py index 8e9721ac..aeeffe54 100644 --- a/config.py +++ b/config.py @@ -1,10 +1,13 @@ import sys -is_half=True -exp_root="logs" -python_exec=sys.executable or "python" -infer_device="cuda" -webui_port_main=9874 -webui_port_uvr5=9873 -webui_port_infer_tts=9872 -webui_port_subfix=9871 +is_half = True +exp_root = "logs" +python_exec = sys.executable or "python" +infer_device = "cuda" + +webui_port_main = 9874 +webui_port_uvr5 = 9873 +webui_port_infer_tts = 9872 +webui_port_subfix = 9871 + +api_port = 9880 From e0590b9c2659b6aa1ffd468babad52b7b6fdaba2 Mon Sep 17 00:00:00 2001 From: Yongzheng Lai Date: Fri, 19 Jan 2024 07:15:11 +0000 Subject: [PATCH 25/46] fix: users.pth path check --- webui.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index 6f3391cc..3b93e15e 100644 --- a/webui.py +++ b/webui.py @@ -20,11 +20,12 @@ if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" #os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" for site_packages_root in site_packages_roots: - with open("%s/users.pth" % (site_packages_root), "w") as f: - f.write( - "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" - % (now_dir, now_dir, now_dir, now_dir, now_dir) - ) + if os.path.exists("%s/users.pth" % (site_packages_root)): + with open("%s/users.pth" % (site_packages_root), "w") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) import traceback sys.path.append(now_dir) import shutil From d2c2d4eb34a6dcbd8f0127b212ad4cedd434a2a0 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Fri, 19 Jan 2024 15:23:14 +0800 Subject: [PATCH 26/46] Update webui.py --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 3b93e15e..02ba03d1 100644 --- a/webui.py +++ b/webui.py @@ -20,7 +20,7 @@ if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" #os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" for site_packages_root in site_packages_roots: - if os.path.exists("%s/users.pth" % (site_packages_root)): + if os.path.exists(site_packages_root): with open("%s/users.pth" % (site_packages_root), "w") as f: f.write( "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" From bd2c770ed93760d8dd4cc86ac1421d870cb8b96b Mon Sep 17 00:00:00 2001 From: Xaiat Date: Fri, 19 Jan 2024 18:42:25 +0800 Subject: [PATCH 27/46] Update REAME and README_ZH --- README.md | 2 +- README_ZH.md | 160 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 README_ZH.md diff --git a/README.md b/README.md index 2adecf06..47b29c55 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) [![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) -[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) +[**English**](./README.md) | [**中文简体**](./README_ZH.md) diff --git a/README_ZH.md b/README_ZH.md new file mode 100644 index 00000000..db2f6bb8 --- /dev/null +++ b/README_ZH.md @@ -0,0 +1,160 @@ +
+ +

GPT-SoVITS-WebUI

+少样本强大的声音转换与文本到语音网络界面。

+ +[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange +)](https://github.com/RVC-Boss/GPT-SoVITS) + +
+ +[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) +[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) + +[**English**](./README.md) | [**中文简体**](./README_ZH.md) + +
+ +------ + + + +> 查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) + +https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + +## 功能: +1. **零样本文本到语音(TTS):** 输入5秒的声音样本,即刻体验文本到语音转换。 + +2. **少样本TTS:** 仅需1分钟的训练数据即可微调模型,提升声音相似度和真实感。 + +3. **跨语言支持:** 支持与训练数据集不同语言的推理,目前支持英语、日语和中文。 + +4. **WebUI工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注,协助初学者创建训练数据集和GPT/SoVITS模型。 + +## 环境准备 + +如果你是Windows用户(已在win>=10上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。 + +### Python和PyTorch版本 + +已在Python 3.9、PyTorch 2.0.1和CUDA 11上测试。 + +### 使用Conda快速安装 + +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +bash install.sh +``` +### 手动安装包 +#### Pip包 + +```bash +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet +``` + +#### 额外要求 + +如果你需要中文自动语音识别(由FunASR支持),请安装: + +```bash +pip install modelscope torchaudio sentencepiece funasr +``` + +#### FFmpeg + +##### Conda 使用者 +```bash +conda install ffmpeg +``` + +##### Ubuntu/Debian 使用者 + +```bash +sudo apt install ffmpeg +sudo apt install libsox-dev +conda install -c conda-forge 'ffmpeg<7' +``` + +##### MacOS 使用者 + +```bash +brew install ffmpeg +``` + +##### Windows 使用者 + +下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。 + +### 预训练模型 + + +从 [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) 下载预训练模型,并将它们放置在 `GPT_SoVITS\pretrained_models` 中。 + +对于中文自动语音识别(另外),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。 + +对于UVR5(人声/伴奏分离和混响移除,另外),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。 + + +## 数据集格式 + +文本到语音(TTS)注释 .list 文件格式: + +``` +vocal_path|speaker_name|language|text +``` + +语言字典: + +- 'zh': Chinese +- 'ja': Japanese +- 'en': English + +示例: + +``` +D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. +``` +## 待办事项清单 + +- [ ] **高优先级:** + - [ ] 日语和英语的本地化。 + - [ ] 用户指南。 + - [ ] 日语和英语数据集微调训练。 + +- [ ] **Features:** + - [ ] 零样本声音转换(5秒)/ 少样本声音转换(1分钟)。 + - [ ] TTS语速控制。 + - [ ] 增强的TTS情感控制。 + - [ ] 尝试将SoVITS令牌输入更改为词汇的概率分布。 + - [ ] 改进英语和日语文本前端。 + - [ ] 开发体积小和更大的TTS模型。 + - [ ] Colab脚本。 + - [ ] 扩展训练数据集(从2k到10k)。 + - [ ] 更好的sovits基础模型(增强的音频质量)。 + - [ ] 模型混合。 + +## 致谢 + +特别感谢以下项目和贡献者: + +- [ar-vits](https://github.com/innnky/ar-vits) +- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR) +- [vits](https://github.com/jaywalnut310/vits) +- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556) +- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain) +- [contentvec](https://github.com/auspicious3000/contentvec/) +- [hifi-gan](https://github.com/jik876/hifi-gan) +- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large) +- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41) +- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui) +- [audio-slicer](https://github.com/openvpi/audio-slicer) +- [SubFix](https://github.com/cronrpc/SubFix) +- [FFmpeg](https://github.com/FFmpeg/FFmpeg) +- [gradio](https://github.com/gradio-app/gradio) + +## 感谢所有贡献者的努力 + + + From 95bb2c921e857971d72b0118fd4f65691fd594a6 Mon Sep 17 00:00:00 2001 From: c4fun Date: Fri, 19 Jan 2024 20:49:03 +0800 Subject: [PATCH 28/46] =?UTF-8?q?=E8=A7=A3=E5=86=B3=E8=BE=93=E5=85=A5?= =?UTF-8?q?=E7=9B=AE=E6=A0=87=E6=96=87=E6=9C=AC=E7=9A=84=E7=A9=BA=E8=A1=8C?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E6=8A=A5=E9=94=99=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/inference_webui.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 7920d60a..e5e604f5 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -175,6 +175,9 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) dtype=np.float16 if is_half == True else np.float32, ) for text in texts: + # 解决输入目标文本的空行导致报错的问题 + if (len(text.strip()) == 0): + continue phones2, word2ph2, norm_text2 = clean_text(text, text_language) phones2 = cleaned_text_to_sequence(phones2) if prompt_language == "zh": From 426cc32258fb094e50097c3833c2b1da316e0a8e Mon Sep 17 00:00:00 2001 From: http-404-usernotfound <107795857+http-404-usernotfound@users.noreply.github.com> Date: Fri, 19 Jan 2024 21:47:42 +0530 Subject: [PATCH 29/46] Update README.md Some users were facing problems while installing the PIP packages because distutils.cmd module was missing in their Python environment. --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 2adecf06..2c88c3bf 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,12 @@ conda activate GPTSoVits bash install.sh ``` ### Install Manually +#### Make sure you have the distutils for python3.9 installed + +```bash +sudo apt-get install python3.9-distutils +``` + #### Pip Packages ```bash From f2a1466995f74c60ceff4b4feb0076dcaee8e4c6 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sat, 20 Jan 2024 15:11:11 +0900 Subject: [PATCH 30/46] Add Japanese README --- README.md | 2 +- docs/ja/README.md | 166 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 docs/ja/README.md diff --git a/README.md b/README.md index 2c88c3bf..3d7c9983 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.

[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) [![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) -[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) +[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) diff --git a/docs/ja/README.md b/docs/ja/README.md new file mode 100644 index 00000000..1e7eebf7 --- /dev/null +++ b/docs/ja/README.md @@ -0,0 +1,166 @@ +
+ +

GPT-SoVITS-WebUI

+パワフルな数発音声変換・音声合成 WebUI。

+ +[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange +)](https://github.com/RVC-Boss/GPT-SoVITS) + +
+ +[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) +[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) + +[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](./README.md) + +
+ +------ + + + +> [デモ動画](https://www.bilibili.com/video/BV12g4y1m7Uw)をチェック! + +https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + +## 機能: +1. **セロショット TTS:** 5秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。 + +2. **数ショット TTS:** わずか1分間のトレーニングデータでモデルを微調整し、音声の類似性とリアリズムを向上。 + +3. **多言語サポート:** 現在、英語、日本語、中国語をサポートしています。 + +4. **WebUI ツール:** 統合されたツールには、音声伴奏の分離、トレーニングセットの自動セグメンテーション、中国語 ASR、テキストラベリングが含まれ、初心者がトレーニングデータセットと GPT/SoVITS モデルを作成するのを支援します。 + +## 環境の準備 + +Windows ユーザーであれば(win>=10 にてテスト済み)、prezip 経由で直接インストールできます。[prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) をダウンロードして解凍し、go-webui.bat をダブルクリックするだけで GPT-SoVITS-WebUI が起動します。 + +### Python と PyTorch のバージョン + +Python 3.9、PyTorch 2.0.1、CUDA 11でテスト済。 + +### Conda によるクイックインストール + +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +bash install.sh +``` +### 手動インストール +#### python3.9 用の distutils がインストールされていることを確認する + +```bash +sudo apt-get install python3.9-distutils +``` + +#### Pip パッケージ + +```bash +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet +``` + +#### 追加要件 + +中国語の ASR(FunASR がサポート)が必要な場合は、以下をインストールしてください: + +```bash +pip install modelscope torchaudio sentencepiece funasr +``` + +#### FFmpeg + +##### Conda ユーザー +```bash +conda install ffmpeg +``` + +##### Ubuntu/Debian ユーザー + +```bash +sudo apt install ffmpeg +sudo apt install libsox-dev +conda install -c conda-forge 'ffmpeg<7' +``` + +##### MacOS ユーザー + +```bash +brew install ffmpeg +``` + +##### Windows ユーザー + +[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) と [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) をダウンロードし、GPT-SoVITS のルートディレクトリに置きます。 + +### 事前訓練済みモデル + + +[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。 + +中国語 ASR(追加)については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/damo_asr/models` に置いてください。 + +UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。 + + +## データセット形式 + +TTS アノテーション .list ファイル形式: + +``` +vocal_path|speaker_name|language|text +``` + +言語辞書: + +- 'zh': 中国語 +- 'ja': 日本語 +- 'en': 英語 + +例: + +``` +D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. +``` +## Todo リスト + +- [ ] **優先度 高:** + - [ ] 日本語と英語でのローカライズ。 + - [ ] ユーザーガイド。 + - [ ] 日本語データセットと英語データセットのファインチューニングトレーニング。 + +- [ ] **機能:** + - [ ] ゼロショット音声変換(5秒)/数ショット音声変換(1分)。 + - [ ] TTS スピーキングスピードコントロール。 + - [ ] TTS の感情コントロールの強化。 + - [ ] SoVITS トークン入力を語彙の確率分布に変更する実験。 + - [ ] 英語と日本語のテキストフロントエンドを改善。 + - [ ] 小型と大型の TTS モデルを開発する。 + - [ ] Colab のスクリプト。 + - [ ] トレーニングデータセットを拡張する(2k→10k)。 + - [ ] より良い sovits ベースモデル(音質向上) + - [ ] モデルミックス + +## クレジット + +以下のプロジェクトとコントリビューターに感謝します: + +- [ar-vits](https://github.com/innnky/ar-vits) +- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR) +- [vits](https://github.com/jaywalnut310/vits) +- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556) +- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain) +- [contentvec](https://github.com/auspicious3000/contentvec/) +- [hifi-gan](https://github.com/jik876/hifi-gan) +- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large) +- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41) +- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui) +- [audio-slicer](https://github.com/openvpi/audio-slicer) +- [SubFix](https://github.com/cronrpc/SubFix) +- [FFmpeg](https://github.com/FFmpeg/FFmpeg) +- [gradio](https://github.com/gradio-app/gradio) + +## すべてのコントリビューターに感謝します + + + From 21b9c20fc8aae3be70bb209cbca9823ba7d7ac55 Mon Sep 17 00:00:00 2001 From: Miuzarte <982809597@qq.com> Date: Sat, 20 Jan 2024 14:15:02 +0800 Subject: [PATCH 31/46] =?UTF-8?q?config.py=E5=BC=95=E5=85=A5class,=20requi?= =?UTF-8?q?rement=E8=A1=A5=E5=85=A8,=20=E8=87=AA=E8=BF=B0=E8=BF=BD?= =?UTF-8?q?=E5=8A=A0=E5=B7=B2=E6=B5=8B=E8=AF=95=E7=8E=AF=E5=A2=83?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 3 ++- README.md | 9 +++++--- api.py | 63 +++++++++++++++++++++++++++++------------------------- config.py | 33 ++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index 3e82a980..ad5ee115 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ env -runtime \ No newline at end of file +runtime +.idea diff --git a/README.md b/README.md index 2adecf06..e6c71bfe 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,12 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350- If you are a Windows user (tested with win>=10) you can install directly via the prezip. Just download the [prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true), unzip it and double-click go-webui.bat to start GPT-SoVITS-WebUI. -### Python and PyTorch Version +### Tested Environments -Tested with Python 3.9, PyTorch 2.0.1, and CUDA 11. +- Python 3.9, PyTorch 2.0.1, CUDA 11 +- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3 + +_Note: numba==0.56.4 require py<3.11_ ### Quick Install with Conda @@ -90,7 +93,7 @@ Download and place [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWeb ### Pretrained Models -Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS\pretrained_models`. +Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`. For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`. diff --git a/api.py b/api.py index 8a476cd6..376b0bcf 100644 --- a/api.py +++ b/api.py @@ -19,40 +19,37 @@ from text import cleaned_text_to_sequence from text.cleaner import clean_text from module.mel_processing import spectrogram_torch from my_utils import load_audio -from config import python_exec, infer_device, is_half, api_port +import config as global_config -DEFAULT_PORT = api_port -DEFAULT_CNHUBERT = "GPT_SoVITS/pretrained_models/chinese-hubert-base" -DEFAULT_BERT = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" -DEFAULT_HALF = is_half - -DEFAULT_GPT = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" -DEFAULT_SOVITS = "GPT_SoVITS/pretrained_models/s2G488k.pth" +g_config = global_config.Config() # AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser(description="GPT-SoVITS api") -parser.add_argument("-g", "--gpt_path", type=str, default="", help="GPT模型路径") -parser.add_argument("-s", "--sovits_path", type=str, default="", help="SoVITS模型路径") +parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径") +parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径") parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径, 请求缺少参考音频时调用") parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") -parser.add_argument("-d", "--device", type=str, default=infer_device, help="cuda / cpu") -parser.add_argument("-p", "--port", type=int, default=DEFAULT_PORT, help="default: 9880") +parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") +parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1") -parser.add_argument("-hp", "--half_precision", action='store_true', default=False) +parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") +parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度") +# bool值的用法为 `python ./api.py -fp ...` +# 此时 full_precision==True, half_precision==False -parser.add_argument("-hb", "--hubert_path", type=str, default=DEFAULT_CNHUBERT) -parser.add_argument("-b", "--bert_path", type=str, default=DEFAULT_BERT) +parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path") +parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path") args = parser.parse_args() -gpt_path = args.gpt_path sovits_path = args.sovits_path +gpt_path = args.gpt_path default_refer_path = args.default_refer_path default_refer_text = args.default_refer_text @@ -62,18 +59,15 @@ has_preset = False device = args.device port = args.port host = args.bind_addr -is_half = args.half_precision -cnhubert_base_path = args.hubert_path -bert_path = args.bert_path - -if gpt_path == "": - gpt_path = DEFAULT_GPT - print("[WARN] 未指定GPT模型路径") if sovits_path == "": - sovits_path = DEFAULT_SOVITS - print("[WARN] 未指定SoVITS模型路径") + sovits_path = g_config.pretrained_sovits_path + print(f"[WARN] 未指定SoVITS模型路径, fallback后当前值: {sovits_path}") +if gpt_path == "": + gpt_path = g_config.pretrained_gpt_path + print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}") +# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用 if default_refer_path == "" or default_refer_text == "" or default_refer_language == "": default_refer_path, default_refer_text, default_refer_language = "", "", "" print("[INFO] 未指定默认参考音频") @@ -84,17 +78,28 @@ else: print(f"[INFO] 默认参考音频语种: {default_refer_language}") has_preset = True +is_half = g_config.is_half +if args.full_precision: + is_half = False +if args.half_precision: + is_half = True +if args.full_precision and args.half_precision: + is_half = g_config.is_half # 炒饭fallback + +print(f"[INFO] 半精: {is_half}") + +cnhubert_base_path = args.hubert_path +bert_path = args.bert_path + cnhubert.cnhubert_base_path = cnhubert_base_path tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) -# bert_model = AutoModelForSequenceClassification.from_pretrained(bert_path, config=bert_path+"/config.json") -if (is_half == True): +if is_half: bert_model = bert_model.half().to(device) else: bert_model = bert_model.to(device) -# bert_model=bert_model.to(device) def get_bert_feature(text, word2ph): with torch.no_grad(): inputs = tokenizer(text, return_tensors="pt") @@ -256,7 +261,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) def handle(command, refer_wav_path, prompt_text, prompt_language, text, text_language): if command == "/restart": - os.execl(python_exec, python_exec, *sys.argv) + os.execl(g_config.python_exec, g_config.python_exec, *sys.argv) elif command == "/exit": os.kill(os.getpid(), signal.SIGTERM) exit(0) diff --git a/config.py b/config.py index aeeffe54..504ca62d 100644 --- a/config.py +++ b/config.py @@ -1,6 +1,16 @@ import sys + +# 推理用的指定模型 +sovits_path = "" +gpt_path = "" is_half = True + +cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" +bert_path = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" +pretrained_sovits_path = "GPT_SoVITS/pretrained_models/s2G488k.pth" +pretrained_gpt_path = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" + exp_root = "logs" python_exec = sys.executable or "python" infer_device = "cuda" @@ -11,3 +21,26 @@ webui_port_infer_tts = 9872 webui_port_subfix = 9871 api_port = 9880 + + +class Config: + def __init__(self): + self.sovits_path = sovits_path + self.gpt_path = gpt_path + self.is_half = is_half + + self.cnhubert_path = cnhubert_path + self.bert_path = bert_path + self.pretrained_sovits_path = pretrained_sovits_path + self.pretrained_gpt_path = pretrained_gpt_path + + self.exp_root = exp_root + self.python_exec = python_exec + self.infer_device = infer_device + + self.webui_port_main = webui_port_main + self.webui_port_uvr5 = webui_port_uvr5 + self.webui_port_infer_tts = webui_port_infer_tts + self.webui_port_subfix = webui_port_subfix + + self.api_port = api_port From eb7e8641189e223ee4262ed83cb33312ce7b0d19 Mon Sep 17 00:00:00 2001 From: Rafael Godoy <78083427+RafaelGodoyEbert@users.noreply.github.com> Date: Sat, 20 Jan 2024 20:40:11 -0300 Subject: [PATCH 32/46] Fix of the i18n implementation + pt_BR In the webui.py file, several strings with i18n code were missing to enable translation as soon as they are identified. --- i18n/locale/en_US.json | 228 +++++++++++++------------------ i18n/locale/pt_BR.json | 93 +++++++++++++ webui.py | 302 +++++++++++++++++++++-------------------- 3 files changed, 344 insertions(+), 279 deletions(-) create mode 100644 i18n/locale/pt_BR.json diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index d5855052..51775333 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -1,135 +1,93 @@ -{ - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", - "A模型权重": "Weight (w) for Model A:", - "A模型路径": "Path to Model A:", - "B模型路径": "Path to Model B:", - "E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:", - "Index Rate": "Index Rate", - "Onnx导出": "Export Onnx", - "Onnx输出路径": "Onnx Export Path:", - "RVC模型路径": "RVC Model Path:", - "ckpt处理": "ckpt Processing", - "harvest进程数": "Number of CPU processes used for harvest pitch algorithm", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.", - "step1:正在处理数据": "Step 1: Processing data", - "step2:正在提取音高&正在提取特征": "step2:Pitch extraction & feature extraction", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):", - "step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index", - "step3a:正在训练模型": "Step 3a: Model training started", - "一键训练": "One-click training", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Multiple audio files can also be imported. If a folder path exists, this input is ignored.", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.
Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).
The model is divided into three categories:
1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.
2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.
3. De-reverb and de-delay models (by FoxJoy):
  (1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;
 (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.
De-reverb/de-delay notes:
1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.
2. The MDX-Net-Dereverb model is quite slow.
3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:", - "伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal", - "使用模型采样率": "使用模型采样率", - "使用设备采样率": "使用设备采样率", - "保存名": "Save name:", - "保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):", - "保存的模型名不带后缀": "Saved model name (without extension):", - "保存频率save_every_epoch": "Save frequency (save_every_epoch):", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:", - "修改": "Modify", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)", - "停止音频转换": "Stop audio conversion", - "全流程结束!": "All processes have been completed!", - "刷新音色列表和索引路径": "Refresh voice list and index path", - "加载模型": "Load model", - "加载预训练底模D路径": "Load pre-trained base model D path:", - "加载预训练底模G路径": "Load pre-trained base model G path:", - "单次推理": "Single Inference", - "卸载音色省显存": "Unload voice to save GPU memory:", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", - "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", - "否": "No", - "启用相位声码器": "启用相位声码器", - "响应阈值": "Response threshold", - "响度因子": "loudness factor", - "处理数据": "Process data", - "导出Onnx模型": "Export Onnx Model", - "导出文件格式": "Export file format", - "常见问题解答": "FAQ (Frequently Asked Questions)", - "常规设置": "General settings", - "开始音频转换": "Start audio conversion", - "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", - "性能设置": "Performance settings", - "总训练轮数total_epoch": "Total training epochs (total_epoch):", - "批量推理": "Batch Inference", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", - "指定输出主人声文件夹": "Specify the output folder for vocals:", - "指定输出文件夹": "Specify output folder:", - "指定输出非主人声文件夹": "Specify the output folder for accompaniment:", - "推理时间(ms):": "Inference time (ms):", - "推理音色": "Inferencing voice:", - "提取": "Extract", - "提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:", - "是": "Yes", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:", - "显卡信息": "GPU Information", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.", - "查看": "View", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)", - "检索特征占比": "Search feature ratio (controls accent strength, too high has artifacting):", - "模型": "Model", - "模型推理": "Model Inference", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:", - "模型是否带音高指导": "Whether the model has pitch guidance:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):", - "模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):", - "模型版本型号": "Model architecture version:", - "模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion", - "模型路径": "Path to Model:", - "每张显卡的batch_size": "Batch size per GPU:", - "淡入淡出长度": "Fade length", - "版本": "Version", - "特征提取": "Feature extraction", - "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", - "目标采样率": "Target sample rate:", - "算法延迟(ms):": "Algorithmic delays(ms):", - "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:", - "融合": "Fusion", - "要改的模型信息": "Model information to be modified:", - "要置入的模型信息": "Model information to be placed:", - "训练": "Train", - "训练模型": "Train model", - "训练特征索引": "Train feature index", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", - "请指定说话人id": "Please specify the speaker/singer ID:", - "请选择index文件": "Please choose the .index file", - "请选择pth文件": "Please choose the .pth file", - "请选择说话人id": "Select Speaker/Singer ID:", - "转换": "Convert", - "输入实验名": "Enter the experiment name:", - "输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", - "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:", - "输入监听": "Input voice monitor", - "输入训练文件夹路径": "Enter the path of the training folder:", - "输入设备": "Input device", - "输入降噪": "Input noise reduction", - "输出信息": "Output information", - "输出变声": "Output converted voice", - "输出设备": "Output device", - "输出降噪": "Output noise reduction", - "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", - "选择.index文件": "Select the .index file", - "选择.pth文件": "Select the .pth file", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", - "采样率:": "采样率:", - "采样长度": "Sample length", - "重载设备列表": "Reload device list", - "音调设置": "Pitch settings", - "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)", - "音高算法": "pitch detection algorithm", - "额外推理时长": "Extra inference time" -} +{ + "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately you do not have a working graphics card to support your training", + "UVR5已开启": "UVR5 is on", + "UVR5已关闭": "UVR5 is turned off", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Those who use the software and disseminate the sounds exported by the software are fully responsible.
If you do not agree with this clause, you cannot use or quote any code and code in the software package. File. See root directory LICENSE for details.", + "0-前置数据集获取工具": "0- Pre-front dataset acquisition tools", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0A-UVR5 voice accompaniment separation & to the sound of reverberation and delay tools", + "是否开启UVR5-WebUI": "Whether to turn on UVR5-WEBUI", + "UVR5进程输出信息": "UVR5 process output information", + "0b-语音切分工具": "0b-voice cutting tool", + "音频自动切分输入路径,可文件可文件夹": "Audio automatic cutting into the input path, the file and the folder", + "切分后的子音频的输出根目录": "The output root directory of the sub -audio output", + "threshold:音量小于这个值视作静音的备选切割点": "Threshold: The volume is less than this value as a mute alternative cutting point", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: The minimum length of each paragraph, if the first paragraph is too short to connect with the latter section until this value exceeds this value", + "min_interval:最短切割间隔": "min_interval: the shortest cutting interval", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "HOP_SIZE: How to calculate the volume curve, the smaller the accuracy, the higher the calculation amount (not the more accuracy, the better the effect, the better)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: After cutting, how long is the mute at most", + "开启语音切割": "Open voice cutting", + "终止语音切割": "Termination of voice cutting", + "max:归一化后最大值多少": "MAX: How much is the maximum value after a normalization?", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: How many percentages are mixed back, and the audio comes in", + "切割使用的进程数": "Number of processes for cutting", + "语音切割进程输出信息": "Voice cutting process output information", + "0c-中文批量离线ASR工具": "0c- Chinese batch offline ASR tool", + "开启离线批量ASR": "Open offline batch ASR", + "终止ASR进程": "Terminate the ASR process", + "批量ASR(中文only)输入文件夹路径": "Batch ASR (Chinese only) input folder path", + "ASR进程输出信息": "ASR process output information", + "0d-语音文本校对标注工具": "0d-voice text school pairing tool", + "是否开启打标WebUI": "Whether to open the marking webui", + "打标数据标注文件路径": "Playing data label file path", + "打标工具进程输出信息": "Playing tool process output information", + "1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS", + "*实验/模型名": "*Experiment/model name", + "显卡信息": "Graphics card information", + "预训练的SoVITS-G模型路径": "Pre-trained SOVITS-G model path", + "预训练的SoVITS-D模型路径": "Pre-trained SOVITS-D model path", + "预训练的GPT模型路径": "Pre -training GPT model path", + "1A-训练集格式化工具": "1A-training collection tool", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Output LOGS/experimental name directory should have files and folders starting with 23456", + "*文本标注文件": "*Text label file", + "*训练集音频文件目录": "*Training set audio file directory", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Training the file name corresponding to the waveform of the waveform in the List file of the audio file", + "1Aa-文本内容": "1AA-text content", + "GPU卡号以-分割,每个卡号一个进程": "GPU card number is divided by-division, each card number is one process", + "预训练的中文BERT模型路径": "Pre -training Chinese bert model path", + "开启文本获取": "Get the text to get", + "终止文本获取进程": "Termination text acquisition process", + "文本进程输出信息": "Text process output information", + "1Ab-SSL自监督特征提取": "1AB-SSL self-supervision feature extraction", + "预训练的SSL模型路径": "Pre -training SSL model path", + "开启SSL提取": "Open SSL extraction", + "终止SSL提取进程": "Termid SSL extraction process", + "SSL进程输出信息": "SSL process output information", + "1Ac-语义token提取": "1AC-semantic token extraction", + "开启语义token提取": "Open semantic token extraction", + "终止语义token提取进程": "Terminate semantics token extraction process", + "语义token提取进程输出信息": "Semantic token extraction process output information", + "1Aabc-训练集格式化一键三连": "1AABC-Training Collection Formulate One-button Three Companies", + "开启一键三连": "Open one button and three consecutive", + "终止一键三连": "Termine one button and three companies", + "一键三连进程输出信息": "One -click three -line process output information", + "1B-微调训练": "1B-fine-tuning training", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1ba-sovits training. The model file for sharing is output under SOVITS_WEIGHTS", + "每张显卡的batch_size": "Batch_size of each graphics card", + "总训练轮数total_epoch,不建议太高": "Total_epoch, the total training wheel is not recommended to be too high", + "文本模块学习率权重": "The weight of the text module learning rate", + "保存频率save_every_epoch": "Save the frequency save_every_epoch", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Whether to save the latest CKPT file to save hard disk space", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Whether to save the final small model to the Weights folder at each time of saving time", + "开启SoVITS训练": "Open SOVITS training", + "终止SoVITS训练": "Terminate SOVITS training", + "SoVITS训练进程输出信息": "Sovits training process output information", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1BB-GPT training. The model file for sharing is output under GPT_WEIGHTS", + "总训练轮数total_epoch": "Total_epoch, total training wheel", + "开启GPT训练": "Turn on GPT training", + "终止GPT训练": "Termid GPT training", + "GPT训练进程输出信息": "GPT training process output information", + "1C-推理": "1C-reasoning", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Select the models stored in Sovits_weights and GPT_WEIGHTS. The default is the bottom mold, experience for 5 seconds Zero Shot TTS", + "*GPT模型列表": "*GPT model list", + "*SoVITS模型列表": "*Sovits model list", + "GPU卡号,只能填1个整数": "GPU card number, can only fill in one integer", + "刷新模型路径": "Refresh the model path", + "是否开启TTS推理WebUI": "Whether to turn on the TTS reasoning webui", + "TTS推理WebUI进程输出信息": "TTS reasoning webui process output information", + "2-GPT-SoVITS-变声": "2-gpt-sovits-sound change", + "施工中,请静候佳音": "During the construction, please wait for good sound", + "TTS推理进程已开启": "TTS inference process has been started", + "TTS推理进程已关闭": "TTS inference process has been closed", + "打标工具WebUI已开启": "The marking tool WebUI is turned on", + "打标工具WebUI已关闭": "The marking tool WebUI has been closed" +} diff --git a/i18n/locale/pt_BR.json b/i18n/locale/pt_BR.json new file mode 100644 index 00000000..9a7cc935 --- /dev/null +++ b/i18n/locale/pt_BR.json @@ -0,0 +1,93 @@ +{ + "很遗憾您这没有能用的显卡来支持您训练": "Infelizmente, você não possui uma placa de vídeo funcional para suportar seu treinamento", + "UVR5已开启": "UVR5 está ativado", + "UVR5已关闭": "UVR5 está desativado", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Este software é de código aberto sob a licença MIT. O autor não tem controle sobre o software. Aqueles que usam o software e difundem os sons exportados pelo software são totalmente responsáveis.
Se você não concorda com esta cláusula, não pode usar ou citar nenhum código e arquivo dentro do pacote de software. Consulte o diretório raiz LICENSE para mais detalhes.

Traduzido por Rafael Godoy Ebert", + "0-前置数据集获取工具": "0- Ferramenta de aquisição de conjunto de dados pré-frontal", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0A-UVR5 separação de voz e acompanhamento instrumental & ferramenta para remover reverberação e atraso", + "是否开启UVR5-WebUI": "Se deseja ativar a UVR5-WEBUI", + "UVR5进程输出信息": "Informações de saída do processo UVR5", + "0b-语音切分工具": "0b- Ferramenta de corte de voz", + "音频自动切分输入路径,可文件可文件夹": "Caminho de entrada automático de corte de áudio, pode ser um arquivo ou uma pasta", + "切分后的子音频的输出根目录": "Diretório raiz de saída do sub-áudio após o corte", + "threshold:音量小于这个值视作静音的备选切割点": "Limiar: O volume menor que este valor é considerado como um ponto de corte mudo alternativo", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: O comprimento mínimo de cada parágrafo, se o primeiro for muito curto, conecte-o continuamente aos próximos até ultrapassar este valor", + "min_interval:最短切割间隔": "min_interval: O intervalo de corte mínimo", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "HOP_SIZE: Como calcular a curva de volume, quanto menor a precisão, maior a quantidade de cálculos (não significa que quanto maior a precisão, melhor o efeito)", + "max_sil_kept:切完后静音最多留多长": "max_sil_kept: Depois de cortar, por quanto tempo no máximo o silêncio é mantido", + "开启语音切割": "Ativar corte de voz", + "终止语音切割": "Encerrar corte de voz", + "max:归一化后最大值多少": "MAX: Qual é o valor máximo após a normalização?", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: Em que proporção o áudio normalizado é misturado de volta", + "切割使用的进程数": "Número de processos para corte", + "语音切割进程输出信息": "Informações de saída do processo de corte de voz", + "0c-中文批量离线ASR工具": "0c- Ferramenta chinesa de ASR offline em lote", + "开启离线批量ASR": "Ativar ASR offline em lote", + "终止ASR进程": "Encerrar processo ASR", + "批量ASR(中文only)输入文件夹路径": "Caminho da pasta de entrada para ASR em lote (apenas chinês)", + "ASR进程输出信息": "Informações de saída do processo ASR", + "0d-语音文本校对标注工具": "0d- Ferramenta de correção e marcação de texto de voz", + "是否开启打标WebUI": "Se deseja abrir o webui de marcação", + "打标数据标注文件路径": "Caminho do arquivo de marcação de dados de marcação", + "打标工具进程输出信息": "Informações de saída do processo da ferramenta de marcação", + "1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS", + "*实验/模型名": "*Nome do experimento/modelo", + "显卡信息": "Informações da placa de vídeo", + "预训练的SoVITS-G模型路径": "Caminho do modelo SoVITS-G pre-train", + "预训练的SoVITS-D模型路径": "Caminho do modelo SoVITS-D pre-train", + "预训练的GPT模型路径": "Caminho do modelo GPT pre-train", + "1A-训练集格式化工具": "1A-Ferramenta de formatação de conjunto de dados de treinamento", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "Logs de saída/deve haver arquivos e pastas começando com 23456 no diretório do nome do experimento", + "*文本标注文件": "*Arquivo de marcação de texto", + "*训练集音频文件目录": "*Diretório de arquivos de áudio do conjunto de treinamento", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Diretório de arquivos de áudio do conjunto de treinamento. Concatene o nome do arquivo correspondente à forma de onda no arquivo de lista", + "1Aa-文本内容": "1AA-Conteúdo do texto", + "GPU卡号以-分割,每个卡号一个进程": "Número da placa de vídeo dividido por-, cada número de placa é um processo", + "预训练的中文BERT模型路径": "Caminho do modelo BERT chinês pre-train", + "开启文本获取": "Ativar obtenção de texto", + "终止文本获取进程": "Encerrar processo de obtenção de texto", + "文本进程输出信息": "Informações de saída do processo de texto", + "1Ab-SSL自监督特征提取": "1AB-Extração de características auto-supervisionadas SSL", + "预训练的SSL模型路径": "Caminho do modelo SSL pre-train", + "开启SSL提取": "Ativar extração SSL", + "终止SSL提取进程": "Encerrar processo de extração SSL", + "SSL进程输出信息": "Informações de saída do processo SSL", + "1Ac-语义token提取": "1AC-Extração de token semântico", + "开启语义token提取": "Ativar extração de token semântico", + "终止语义token提取进程": "Encerrar processo de extração de token semântico", + "语义token提取进程输出信息": "Informações de saída do processo de extração de token semântico", + "1Aabc-训练集格式化一键三连": "1AABC-Formatação de conjunto de treinamento em um clique", + "开启一键三连": "Ativar um clique", + "终止一键三连": "Encerrar um clique", + "一键三连进程输出信息": "Informações de saída do processo de um clique", + "1B-微调训练": "1B-Treinamento de ajuste fino", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1ba-Treinamento SoVITS. O arquivo de modelo para compartilhamento é gerado em SOVITS_WEIGHTS", + "每张显卡的batch_size": "Tamanho do lote de cada placa de vídeo", + "总训练轮数total_epoch,不建议太高": "Total de epoch de treinamento, não é recomendável um valor muito alto", + "文本模块学习率权重": "Weight da taxa de aprendizado do módulo de texto", + "保存频率save_every_epoch": "Frequência de salvamento save_every_epoch", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Se deve salvar apenas o último arquivo CKPT para economizar espaço em disco", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Se deve salvar o modelo pequeno final na pasta Weights em cada ponto de salvamento de tempo", + "开启SoVITS训练": "Ativar treinamento SoVITS", + "终止SoVITS训练": "Encerrar treinamento SoVITS", + "SoVITS训练进程输出信息": "Informações de saída do processo de treinamento SoVITS", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1BB-Treinamento GPT. O arquivo de modelo para compartilhamento é gerado em GPT_WEIGHTS", + "总训练轮数total_epoch": "Total de epoch de treinamento", + "开启GPT训练": "Ativar treinamento GPT", + "终止GPT训练": "Encerrar treinamento GPT", + "GPT训练进程输出信息": "Informações de saída do processo de treinamento GPT", + "1C-推理": "1C-raciocínio", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Selecione os modelos armazenados em Sovits_weights e GPT_WEIGHTS. O padrão é o modelo inferior, experiência para 5 segundos de Zero Shot TTS", + "*GPT模型列表": "*Lista de modelos GPT", + "*SoVITS模型列表": "*Lista de modelos Sovits", + "GPU卡号,只能填1个整数": "Número da placa de vídeo, só é possível preencher com um número inteiro", + "刷新模型路径": "Atualizar caminho do modelo", + "是否开启TTS推理WebUI": "Se deseja ativar o webui de raciocínio TTS", + "TTS推理WebUI进程输出信息": "Informações de saída do processo webui de raciocínio TTS", + "2-GPT-SoVITS-变声": "2-gpt-sovits-mudança de voz", + "施工中,请静候佳音": "Em construção, por favor, aguarde por um bom som", + "TTS推理进程已开启": "O processo de inferência TTS foi iniciado", + "TTS推理进程已关闭": "O processo de inferência TTS foi desativado", + "打标工具WebUI已开启": "A ferramenta de marcação WebUI está ativada", + "打标工具WebUI已关闭": "A ferramenta de marcação WebUI foi desativado" +} diff --git a/webui.py b/webui.py index dbccba79..2c559411 100644 --- a/webui.py +++ b/webui.py @@ -1,5 +1,8 @@ import json,yaml,warnings,torch import platform +import psutil +import os +import signal warnings.filterwarnings("ignore") torch.manual_seed(233333) @@ -9,13 +12,20 @@ tmp = os.path.join(now_dir, "TEMP") os.makedirs(tmp, exist_ok=True) os.environ["TEMP"] = tmp import site -site_packages_root="%s/runtime/Lib/site-packages"%now_dir +site_packages_roots = [] for path in site.getsitepackages(): - if("site-packages"in path):site_packages_root=path -os.environ["OPENBLAS_NUM_THREADS"] = "4" + if "packages" in path: + site_packages_roots.append(path) +if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir] +#os.environ["OPENBLAS_NUM_THREADS"] = "4" os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -with open("%s/users.pth"%(site_packages_root),"w")as f: - f.write("%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5"%(now_dir,now_dir,now_dir,now_dir,now_dir)) +for site_packages_root in site_packages_roots: + if os.path.exists(site_packages_root): + with open("%s/users.pth" % (site_packages_root), "w") as f: + f.write( + "%s\n%s/tools\n%s/tools/damo_asr\n%s/GPT_SoVITS\n%s/tools/uvr5" + % (now_dir, now_dir, now_dir, now_dir, now_dir) + ) import traceback sys.path.append(now_dir) import shutil @@ -24,13 +34,13 @@ import gradio as gr from subprocess import Popen import signal from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix -from i18n.i18n import I18nAuto +from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile from tools.my_utils import load_audio from multiprocessing import cpu_count n_cpu=cpu_count() - + # 判断是否有能用来训练和加速推理的N卡 ngpu = torch.cuda.device_count() gpu_infos = [] @@ -78,39 +88,57 @@ p_uvr5=None p_asr=None p_tts_inference=None +def kill_proc_tree(pid, including_parent=True): + try: + parent = psutil.Process(pid) + except psutil.NoSuchProcess: + # Process already terminated + return + + children = parent.children(recursive=True) + for child in children: + try: + os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + if including_parent: + try: + os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL + except OSError: + pass + system=platform.system() def kill_process(pid): if(system=="Windows"): cmd = "taskkill /t /f /pid %s" % pid + os.system(cmd) else: - cmd = "kill -9 %s"%pid - print(cmd) - os.system(cmd)###linux上杀了webui,可能还会没杀干净。。。 - # os.kill(p_label.pid,19)#主进程#控制台进程#python子进程###不好使,连主进程的webui一起关了,辣鸡 + kill_proc_tree(pid) + def change_label(if_label,path_list): global p_label if(if_label==True and p_label==None): cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s'%(python_exec,path_list,webui_port_subfix) - yield "打标工具WebUI已开启" + yield i18n("打标工具WebUI已开启") print(cmd) p_label = Popen(cmd, shell=True) elif(if_label==False and p_label!=None): kill_process(p_label.pid) p_label=None - yield "打标工具WebUI已关闭" + yield i18n("打标工具WebUI已关闭") def change_uvr5(if_uvr5): global p_uvr5 if(if_uvr5==True and p_uvr5==None): cmd = '"%s" tools/uvr5/webui.py "%s" %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5) - yield "UVR5已开启" + yield i18n("UVR5已开启") print(cmd) p_uvr5 = Popen(cmd, shell=True) elif(if_uvr5==False and p_uvr5!=None): kill_process(p_uvr5.pid) p_uvr5=None - yield "UVR5已关闭" + yield i18n("UVR5已关闭") def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path): global p_tts_inference @@ -123,13 +151,13 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path os.environ["is_half"]=str(is_half) os.environ["infer_ttswebui"]=str(webui_port_infer_tts) cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec) - yield "TTS推理进程已开启" + yield i18n("TTS推理进程已开启") print(cmd) p_tts_inference = Popen(cmd, shell=True) elif(if_tts==False and p_tts_inference!=None): kill_process(p_tts_inference.pid) p_tts_inference=None - yield "TTS推理进程已关闭" + yield i18n("TTS推理进程已关闭") def open_asr(asr_inp_dir): @@ -152,10 +180,6 @@ def close_asr(): p_asr=None return "已终止ASR进程",{"__type__":"update","visible":True},{"__type__":"update","visible":False} -''' - button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Bb,button1Ba_open,button1Ba_close]) - button1Ba_close.click(close1Ba, [], [info1Bb,button1Ba_open,button1Ba_close]) -''' p_train_SoVITS=None def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D): global p_train_SoVITS @@ -276,25 +300,16 @@ def close_slice(): ps_slice=[] return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -inp_wav_dir= os.environ.get("inp_wav_dir") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir")#"/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name -bert_pretrained_dir= os.environ.get("bert_pretrained_dir")#"/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large" -''' ps1a=[] def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): global ps1a if (ps1a == []): + opt_dir="%s/%s"%(exp_root,exp_name) config={ "inp_text":inp_text, "inp_wav_dir":inp_wav_dir, "exp_name":exp_name, - "opt_dir":"%s/%s"%(exp_root,exp_name), + "opt_dir":opt_dir, "bert_pretrained_dir":bert_pretrained_dir, } gpu_names=gpu_numbers.split("-") @@ -308,7 +323,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): "is_half": str(is_half) } ) - os.environ.update(config) + os.environ.update(config)# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec print(cmd) p = Popen(cmd, shell=True) @@ -316,6 +331,15 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1a: p.wait() + opt = [] + for i_part in range(all_parts): + txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part) + with open(txt_path, "r", encoding="utf8") as f: + opt += f.read().strip("\n").split("\n") + os.remove(txt_path) + path_text = "%s/2-name2text.txt" % opt_dir + with open(path_text, "w", encoding="utf8") as f: + f.write("\n".join(opt) + "\n") ps1a=[] yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: @@ -331,16 +355,7 @@ def close1a(): traceback.print_exc() ps1a=[] return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -inp_wav_dir= os.environ.get("inp_wav_dir") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir") -cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir") -''' + ps1b=[] def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): global ps1b @@ -386,23 +401,16 @@ def close1b(): traceback.print_exc() ps1b=[] return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False} -''' -inp_text= os.environ.get("inp_text") -exp_name= os.environ.get("exp_name") -i_part= os.environ.get("i_part") -all_parts= os.environ.get("all_parts") -os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") -opt_dir= os.environ.get("opt_dir") -pretrained_s2G= os.environ.get("pretrained_s2G") -''' + ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): global ps1c if (ps1c == []): + opt_dir="%s/%s"%(exp_root,exp_name) config={ "inp_text":inp_text, "exp_name":exp_name, - "opt_dir":"%s/%s"%(exp_root,exp_name), + "opt_dir":opt_dir, "pretrained_s2G":pretrained_s2G_path, "s2config_path":"GPT_SoVITS/configs/s2.json", "is_half": str(is_half) @@ -425,6 +433,15 @@ def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True} for p in ps1c: p.wait() + opt = ["item_name semantic_audio"] + path_semantic = "%s/6-name2semantic.tsv" % opt_dir + for i_part in range(all_parts): + semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part) + with open(semantic_path, "r", encoding="utf8") as f: + opt += f.read().strip("\n").split("\n") + os.remove(semantic_path) + with open(path_semantic, "w", encoding="utf8") as f: + f.write("\n".join(opt) + "\n") ps1c=[] yield "语义token提取进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False} else: @@ -449,7 +466,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb try: #############################1a path_text="%s/2-name2text.txt" % opt_dir - if(os.path.exists(path_text)==False): + if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and os.path.getsize(path_text)<10)): config={ "inp_text":inp_text, "inp_wav_dir":inp_wav_dir, @@ -516,7 +533,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb ps1abc=[] #############################1c path_semantic = "%s/6-name2semantic.tsv" % opt_dir - if(os.path.exists(path_semantic)==False): + if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)): config={ "inp_text":inp_text, "exp_name":exp_name, @@ -574,93 +591,98 @@ def close1abc(): with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown( value= - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE." + i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") ) with gr.Tabs(): - with gr.TabItem("0-前置数据集获取工具"):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 - gr.Markdown(value="0a-UVR5人声伴奏分离&去混响去延迟工具") + with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标 + gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具")) with gr.Row(): - if_uvr5 = gr.Checkbox(label="是否开启UVR5-WebUI",show_label=True) - uvr5_info = gr.Textbox(label="UVR5进程输出信息") - gr.Markdown(value="0b-语音切分工具") + if_uvr5 = gr.Checkbox(label=i18n("是否开启UVR5-WebUI"),show_label=True) + uvr5_info = gr.Textbox(label=i18n("UVR5进程输出信息")) + gr.Markdown(value=i18n("0b-语音切分工具")) with gr.Row(): with gr.Row(): - slice_inp_path=gr.Textbox(label="音频自动切分输入路径,可文件可文件夹",value="") - slice_opt_root=gr.Textbox(label="切分后的子音频的输出根目录",value="output/slicer_opt") - threshold=gr.Textbox(label="threshold:音量小于这个值视作静音的备选切割点",value="-34") - min_length=gr.Textbox(label="min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值",value="4000") - min_interval=gr.Textbox(label="min_interval:最短切割间隔",value="300") - hop_size=gr.Textbox(label="hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)",value="10") - max_sil_kept=gr.Textbox(label="max_sil_kept:切完后静音最多留多长",value="500") + slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),value="") + slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt") + threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34") + min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000") + min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300") + hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10") + max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500") with gr.Row(): - open_slicer_button=gr.Button("开启语音切割", variant="primary",visible=True) - close_slicer_button=gr.Button("终止语音切割", variant="primary",visible=False) - _max=gr.Slider(minimum=0,maximum=1,step=0.05,label="max:归一化后最大值多少",value=0.9,interactive=True) - alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label="alpha_mix:混多少比例归一化后音频进来",value=0.25,interactive=True) - n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label="切割使用的进程数",value=4,interactive=True) - slicer_info = gr.Textbox(label="语音切割进程输出信息") - gr.Markdown(value="0c-中文批量离线ASR工具") + open_slicer_button=gr.Button(i18n("开启语音切割"), variant="primary",visible=True) + close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False) + _max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True) + alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True) + n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True) + slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息")) + gr.Markdown(value=i18n("0c-中文批量离线ASR工具")) with gr.Row(): - open_asr_button = gr.Button("开启离线批量ASR", variant="primary",visible=True) - close_asr_button = gr.Button("终止ASR进程", variant="primary",visible=False) + open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True) + close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False) asr_inp_dir = gr.Textbox( - label="批量ASR(中文only)输入文件夹路径", + label=i18n("批量ASR(中文only)输入文件夹路径"), value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx", interactive=True, ) - asr_info = gr.Textbox(label="ASR进程输出信息") - gr.Markdown(value="0d-语音文本校对标注工具") + asr_info = gr.Textbox(label=i18n("ASR进程输出信息")) + gr.Markdown(value=i18n("0d-语音文本校对标注工具")) with gr.Row(): - if_label = gr.Checkbox(label="是否开启打标WebUI",show_label=True) + if_label = gr.Checkbox(label=i18n("是否开启打标WebUI"),show_label=True) path_list = gr.Textbox( - label="打标数据标注文件路径", + label=i18n("打标数据标注文件路径"), value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list", interactive=True, ) - label_info = gr.Textbox(label="打标工具进程输出信息") + label_info = gr.Textbox(label=i18n("打标工具进程输出信息")) if_label.change(change_label, [if_label,path_list], [label_info]) if_uvr5.change(change_uvr5, [if_uvr5], [uvr5_info]) open_asr_button.click(open_asr, [asr_inp_dir], [asr_info,open_asr_button,close_asr_button]) close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button]) open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button]) close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button]) - with gr.TabItem("1-GPT-SoVITS-TTS"): + with gr.TabItem(i18n("1-GPT-SoVITS-TTS")): with gr.Row(): - exp_name = gr.Textbox(label="*实验/模型名", value="xxx", interactive=True) - gpu_info = gr.Textbox(label="显卡信息", value=gpu_info, visible=True, interactive=False) - pretrained_s2G = gr.Textbox(label="预训练的SoVITS-G模型路径", value="GPT_SoVITS/pretrained_models/s2G488k.pth", interactive=True) - pretrained_s2D = gr.Textbox(label="预训练的SoVITS-D模型路径", value="GPT_SoVITS/pretrained_models/s2D488k.pth", interactive=True) - pretrained_s1 = gr.Textbox(label="预训练的GPT模型路径", value="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", interactive=True) - with gr.TabItem("1A-训练集格式化工具"): - gr.Markdown(value="输出logs/实验名目录下应有23456开头的文件和文件夹") + exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True) + gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False) + pretrained_s2G = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value="GPT_SoVITS/pretrained_models/s2G488k.pth", interactive=True) + pretrained_s2D = gr.Textbox(label=i18n("预训练的SoVITS-D模型路径"), value="GPT_SoVITS/pretrained_models/s2D488k.pth", interactive=True) + pretrained_s1 = gr.Textbox(label=i18n("预训练的GPT模型路径"), value="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", interactive=True) + with gr.TabItem(i18n("1A-训练集格式化工具")): + gr.Markdown(value=i18n("输出logs/实验名目录下应有23456开头的文件和文件夹")) with gr.Row(): - inp_text = gr.Textbox(label="*文本标注文件",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True) - inp_wav_dir = gr.Textbox(label="*训练集音频文件目录",value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",interactive=True) - gr.Markdown(value="1Aa-文本内容") + inp_text = gr.Textbox(label=i18n("*文本标注文件"),value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True) + inp_wav_dir = gr.Textbox( + label=i18n("*训练集音频文件目录"), + # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", + interactive=True, + placeholder=i18n("训练集音频文件目录 拼接 list文件里波形对应的文件名。") + ) + gr.Markdown(value=i18n("1Aa-文本内容")) with gr.Row(): - gpu_numbers1a = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) - bert_pretrained_dir = gr.Textbox(label="预训练的中文BERT模型路径",value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False) - button1a_open = gr.Button("开启文本获取", variant="primary",visible=True) - button1a_close = gr.Button("终止文本获取进程", variant="primary",visible=False) - info1a=gr.Textbox(label="文本进程输出信息") - gr.Markdown(value="1Ab-SSL自监督特征提取") + gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True) + bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False) + button1a_open = gr.Button(i18n("开启文本获取"), variant="primary",visible=True) + button1a_close = gr.Button(i18n("终止文本获取进程"), variant="primary",visible=False) + info1a=gr.Textbox(label=i18n("文本进程输出信息")) + gr.Markdown(value=i18n("1Ab-SSL自监督特征提取")) with gr.Row(): - gpu_numbers1Ba = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) - cnhubert_base_dir = gr.Textbox(label="预训练的SSL模型路径",value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False) - button1b_open = gr.Button("开启SSL提取", variant="primary",visible=True) - button1b_close = gr.Button("终止SSL提取进程", variant="primary",visible=False) - info1b=gr.Textbox(label="SSL进程输出信息") - gr.Markdown(value="1Ac-语义token提取") + gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True) + cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False) + button1b_open = gr.Button(i18n("开启SSL提取"), variant="primary",visible=True) + button1b_close = gr.Button(i18n("终止SSL提取进程"), variant="primary",visible=False) + info1b=gr.Textbox(label=i18n("SSL进程输出信息")) + gr.Markdown(value=i18n("1Ac-语义token提取")) with gr.Row(): - gpu_numbers1c = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程",value="%s-%s"%(gpus,gpus),interactive=True) - button1c_open = gr.Button("开启语义token提取", variant="primary",visible=True) - button1c_close = gr.Button("终止语义token提取进程", variant="primary",visible=False) - info1c=gr.Textbox(label="语义token提取进程输出信息") - gr.Markdown(value="1Aabc-训练集格式化一键三连") + gpu_numbers1c = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True) + button1c_open = gr.Button(i18n("开启语义token提取"), variant="primary",visible=True) + button1c_close = gr.Button(i18n("终止语义token提取进程"), variant="primary",visible=False) + info1c=gr.Textbox(label=i18n("语义token提取进程输出信息")) + gr.Markdown(value=i18n("1Aabc-训练集格式化一键三连")) with gr.Row(): - button1abc_open = gr.Button("开启一键三连", variant="primary",visible=True) - button1abc_close = gr.Button("终止一键三连", variant="primary",visible=False) - info1abc=gr.Textbox(label="一键三连进程输出信息") + button1abc_open = gr.Button(i18n("开启一键三连"), variant="primary",visible=True) + button1abc_close = gr.Button(i18n("终止一键三连"), variant="primary",visible=False) + info1abc=gr.Textbox(label=i18n("一键三连进程输出信息")) button1a_open.click(open1a, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,bert_pretrained_dir], [info1a,button1a_open,button1a_close]) button1a_close.click(close1a, [], [info1a,button1a_open,button1a_close]) button1b_open.click(open1b, [inp_text,inp_wav_dir,exp_name,gpu_numbers1Ba,cnhubert_base_dir], [info1b,button1b_open,button1b_close]) @@ -669,61 +691,53 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: button1c_close.click(close1c, [], [info1c,button1c_open,button1c_close]) button1abc_open.click(open1abc, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G], [info1abc,button1abc_open,button1abc_close]) button1abc_close.click(close1abc, [], [info1abc,button1abc_open,button1abc_close]) - with gr.TabItem("1B-微调训练"): - gr.Markdown(value="1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。") + with gr.TabItem(i18n("1B-微调训练")): + gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。")) with gr.Row(): batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) total_epoch = gr.Slider(minimum=1,maximum=20,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) - text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label="文本模块学习率权重",value=0.4,interactive=True) + text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True) save_every_epoch = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) - gpu_numbers1Ba = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程", value="%s" % (gpus), interactive=True) + gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True) with gr.Row(): - button1Ba_open = gr.Button("开启SoVITS训练", variant="primary",visible=True) - button1Ba_close = gr.Button("终止SoVITS训练", variant="primary",visible=False) - info1Ba=gr.Textbox(label="SoVITS训练进程输出信息") - gr.Markdown(value="1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。") + button1Ba_open = gr.Button(i18n("开启SoVITS训练"), variant="primary",visible=True) + button1Ba_close = gr.Button(i18n("终止SoVITS训练"), variant="primary",visible=False) + info1Ba=gr.Textbox(label=i18n("SoVITS训练进程输出信息")) + gr.Markdown(value=i18n("1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。")) with gr.Row(): batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) total_epoch1Bb = gr.Slider(minimum=2,maximum=100,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True) if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True) save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True) - gpu_numbers1Bb = gr.Textbox(label="GPU卡号以-分割,每个卡号一个进程", value="%s" % (gpus), interactive=True) + gpu_numbers1Bb = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True) with gr.Row(): - button1Bb_open = gr.Button("开启GPT训练", variant="primary",visible=True) - button1Bb_close = gr.Button("终止GPT训练", variant="primary",visible=False) - info1Bb=gr.Textbox(label="GPT训练进程输出信息") + button1Bb_open = gr.Button(i18n("开启GPT训练"), variant="primary",visible=True) + button1Bb_close = gr.Button(i18n("终止GPT训练"), variant="primary",visible=False) + info1Bb=gr.Textbox(label=i18n("GPT训练进程输出信息")) button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Ba,button1Ba_open,button1Ba_close]) button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close]) button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close]) button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close]) - with gr.TabItem("1C-推理"): - gr.Markdown(value="选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。") + with gr.TabItem(i18n("1C-推理")): + gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。")) with gr.Row(): - GPT_dropdown = gr.Dropdown(label="*GPT模型列表", choices=sorted(GPT_names),value=pretrained_gpt_name) - SoVITS_dropdown = gr.Dropdown(label="*SoVITS模型列表", choices=sorted(SoVITS_names),value=pretrained_sovits_name) - gpu_number_1C=gr.Textbox(label="GPU卡号,只能填1个整数", value=gpus, interactive=True) - refresh_button = gr.Button("刷新模型路径", variant="primary") + GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names),value=pretrained_gpt_name) + SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names),value=pretrained_sovits_name) + gpu_number_1C=gr.Textbox(label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True) + refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown]) with gr.Row(): - if_tts = gr.Checkbox(label="是否开启TTS推理WebUI", show_label=True) - tts_info = gr.Textbox(label="TTS推理WebUI进程输出信息") + if_tts = gr.Checkbox(label=i18n("是否开启TTS推理WebUI"), show_label=True) + tts_info = gr.Textbox(label=i18n("TTS推理WebUI进程输出信息")) if_tts.change(change_tts_inference, [if_tts,bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown], [tts_info]) - with gr.TabItem("2-GPT-SoVITS-变声"):gr.Markdown(value="施工中,请静候佳音") - - ''' - os.environ["gpt_path"]=gpt_path - os.environ["sovits_path"]=sovits_path#bert_pretrained_dir - os.environ["cnhubert_base_path"]=cnhubert_base_path#cnhubert_base_dir - os.environ["bert_path"]=bert_path - os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number - ''' - + with gr.TabItem(i18n("2-GPT-SoVITS-变声")):gr.Markdown(value=i18n("施工中,请静候佳音")) app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, + share=True, server_port=webui_port_main, quiet=True, ) From 5addea5695bb8ab9aa351d28b127895ff12822f7 Mon Sep 17 00:00:00 2001 From: Rafael Godoy <78083427+RafaelGodoyEbert@users.noreply.github.com> Date: Sat, 20 Jan 2024 21:36:19 -0300 Subject: [PATCH 33/46] Add files via upload --- tools/i18n/i18n.py | 27 ++++++++++++++ tools/i18n/locale_diff.py | 47 ++++++++++++++++++++++++ tools/i18n/scan_i18n.py | 75 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+) create mode 100644 tools/i18n/i18n.py create mode 100644 tools/i18n/locale_diff.py create mode 100644 tools/i18n/scan_i18n.py diff --git a/tools/i18n/i18n.py b/tools/i18n/i18n.py new file mode 100644 index 00000000..00e91bf3 --- /dev/null +++ b/tools/i18n/i18n.py @@ -0,0 +1,27 @@ +import json +import locale +import os + + +def load_language_list(language): + with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f: + language_list = json.load(f) + return language_list + + +class I18nAuto: + def __init__(self, language=None): + if language in ["Auto", None]: + language = locale.getdefaultlocale()[ + 0 + ] # getlocale can't identify the system's language ((None, None)) + if not os.path.exists(f"./i18n/locale/{language}.json"): + language = "en_US" + self.language = language + self.language_map = load_language_list(language) + + def __call__(self, key): + return self.language_map.get(key, key) + + def __repr__(self): + return "Use Language: " + self.language diff --git a/tools/i18n/locale_diff.py b/tools/i18n/locale_diff.py new file mode 100644 index 00000000..674f7dd2 --- /dev/null +++ b/tools/i18n/locale_diff.py @@ -0,0 +1,47 @@ +import json +import os +from collections import OrderedDict + +# Define the standard file name +standard_file = "locale/zh_CN.json" + +# Find all JSON files in the directory +dir_path = "locale/" +languages = [ + os.path.join(dir_path, f) + for f in os.listdir(dir_path) + if f.endswith(".json") and f != standard_file +] + +# Load the standard file +with open(standard_file, "r", encoding="utf-8") as f: + standard_data = json.load(f, object_pairs_hook=OrderedDict) + +# Loop through each language file +for lang_file in languages: + # Load the language file + with open(lang_file, "r", encoding="utf-8") as f: + lang_data = json.load(f, object_pairs_hook=OrderedDict) + + # Find the difference between the language file and the standard file + diff = set(standard_data.keys()) - set(lang_data.keys()) + + miss = set(lang_data.keys()) - set(standard_data.keys()) + + # Add any missing keys to the language file + for key in diff: + lang_data[key] = key + + # Del any extra keys to the language file + for key in miss: + del lang_data[key] + + # Sort the keys of the language file to match the order of the standard file + lang_data = OrderedDict( + sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) + ) + + # Save the updated language file + with open(lang_file, "w", encoding="utf-8") as f: + json.dump(lang_data, f, ensure_ascii=False, indent=4, sort_keys=True) + f.write("\n") diff --git a/tools/i18n/scan_i18n.py b/tools/i18n/scan_i18n.py new file mode 100644 index 00000000..f3e52cf4 --- /dev/null +++ b/tools/i18n/scan_i18n.py @@ -0,0 +1,75 @@ +import ast +import glob +import json +from collections import OrderedDict + + +def extract_i18n_strings(node): + i18n_strings = [] + + if ( + isinstance(node, ast.Call) + and isinstance(node.func, ast.Name) + and node.func.id == "i18n" + ): + for arg in node.args: + if isinstance(arg, ast.Str): + i18n_strings.append(arg.s) + + for child_node in ast.iter_child_nodes(node): + i18n_strings.extend(extract_i18n_strings(child_node)) + + return i18n_strings + + +# scan the directory for all .py files (recursively) +# for each file, parse the code into an AST +# for each AST, extract the i18n strings + +strings = [] +for filename in glob.iglob("**/*.py", recursive=True): + with open(filename, "r") as f: + code = f.read() + if "I18nAuto" in code: + tree = ast.parse(code) + i18n_strings = extract_i18n_strings(tree) + print(filename, len(i18n_strings)) + strings.extend(i18n_strings) +code_keys = set(strings) +""" +n_i18n.py +gui_v1.py 26 +app.py 16 +infer-web.py 147 +scan_i18n.py 0 +i18n.py 0 +lib/train/process_ckpt.py 1 +""" +print() +print("Total unique:", len(code_keys)) + + +standard_file = "i18n/locale/zh_CN.json" +with open(standard_file, "r", encoding="utf-8") as f: + standard_data = json.load(f, object_pairs_hook=OrderedDict) +standard_keys = set(standard_data.keys()) + +# Define the standard file name +unused_keys = standard_keys - code_keys +print("Unused keys:", len(unused_keys)) +for unused_key in unused_keys: + print("\t", unused_key) + +missing_keys = code_keys - standard_keys +print("Missing keys:", len(missing_keys)) +for missing_key in missing_keys: + print("\t", missing_key) + +code_keys_dict = OrderedDict() +for s in strings: + code_keys_dict[s] = s + +# write back +with open(standard_file, "w", encoding="utf-8") as f: + json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True) + f.write("\n") From fd849ff42e17d41f1dc26ac2d8bc438dabba475c Mon Sep 17 00:00:00 2001 From: DW <147780325+D3lik@users.noreply.github.com> Date: Sun, 21 Jan 2024 13:14:09 +1100 Subject: [PATCH 34/46] Translated en_US.json --- i18n/locale/en_US.json | 147 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index d5855052..895f15ba 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -1,4 +1,151 @@ { + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.", + "*GPT模型列表": "*GPT models list", + "*SoVITS模型列表": "*SoVITS models list", + "*实验/模型名": "*Experiment/model name", + "*文本标注文件": "*Text labelling file", + "*训练集音频文件目录": "*Audio dataset folder", + "*请上传并填写参考信息": "*Please upload and fill reference information", + "*请填写需要合成的目标文本": "*Please fill the text that needs inference", + "0-前置数据集获取工具": "0-Fech dataset", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)", + "0b-语音切分工具": "0b-Audio slicer", + "0c-中文批量离线ASR工具": "0c-Chinese ASR tool", + "0d-语音文本校对标注工具": "0d-Speech to text proofreading tool", + "1A-训练集格式化工具": "1A-Dataset formatting", + "1Aa-文本内容": "1Aa-Text", + "1Aabc-训练集格式化一键三连": "1Aabc-One-click formatting", + "1Ab-SSL自监督特征提取": "1Ab-SSL self-supervised feature extraction", + "1Ac-语义token提取": "1Ac-semantics token extraction", + "1B-微调训练": "1B-Fine-tuned training", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS training. The model is located in SoVITS_weights.", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT training. The model is located in GPT_weights.", + "1C-推理": "1C-inference", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer", + "ASR任务开启:%s": "ASR training started: %s", + "ASR进程输出信息": "ASR output log", + "GPT训练完成": "Finished GPT training", + "GPT训练开始:%s": "GPT training started: %s", + "GPT训练进程输出信息": "GPT training output log", + "GPU卡号,只能填1个整数": "GPU number, can only input ONE integer", + "GPU卡号以-分割,每个卡号一个进程": "GPU number is separated by -, each GPU will run one process ", + "SSL提取进程执行中": "SSL extracting", + "SSL提取进程结束": "SSL extraction finished", + "SSL进程输出信息": "SSL output log", + "SoVITS训练完成": "SoVITS training finished", + "SoVITS训练开始:%s": "SoVITS training started:%s", + "SoVITS训练进程输出信息": "SoVITS training output log", + "TTS推理WebUI进程输出信息": "TTS inference webui output log", + "TTS推理进程已关闭": "TTS inference process closed", + "TTS推理进程已开启": "TTS inference process is opened", + "UVR5已关闭": "UVR5 closed", + "UVR5已开启": "UVR5 opened ", + "UVR5进程输出信息": "UVR5 process output log", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion of normalized audio merged into dataset", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: FO hop size, the smaller the value, the higher the accuracy)", + "max:归一化后最大值多少": "Loudness multiplier after normalized", + "max_sil_kept:切完后静音最多留多长": "Maximum length for silence to be kept", + "min_interval:最短切割间隔": "Minumum interval for audio cutting", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "Minimum length", + "threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise", + "一键三连中途报错": "An error has occured during One-click formatting", + "一键三连进程结束": "Finished one-click formatting", + "一键三连进程输出信息": "One-click formatting output", + "中文": "Chinese", + "保存频率save_every_epoch": "Save frequency (save_every_epoch):", + "凑50字一切": "Cut per 50 characters", + "凑五句一切": "Cut per 5 sentences", + "切分后文本": "Text after sliced", + "切分后的子音频的输出根目录": "Audio slicer output folder", + "切割使用的进程数": "CPU threads used for audio slicing", + "切割执行中": "Slicing audio", + "切割结束": "finished audio slicing", + "刷新模型路径": "refreshing model paths", + "参考音频的文本": "Text for reference audio", + "参考音频的语种": "Language for reference audio", + "合成语音": "Start inference", + "后续将支持混合语种编码文本输入。": "Mixed languages input will be supported soon.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": " An ASR task is already in progress, please stop before starting the next task", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "A GPT training task is already in progress, please stop before starting the next task", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "A SSL extraction task is already in progress, please stop before starting the next task", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "A SoVITS training task is already in progress, please stop before starting the next task", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "An ASR task is already in progress, please stop before starting the next task", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "An audio slicing task is already in progress, please stop before starting the next task", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "A TTS proofreading task is already in progress, please stop before starting the next task", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "A semantics token extraction task is already in progress, please stop before starting the next task", + "已终止ASR进程": "ASR task has been stopped", + "已终止GPT训练": "GPT training has been stopped", + "已终止SoVITS训练": "SoVITS training has been stopped", + "已终止所有1a进程": "All 1a tasks has been stopped", + "已终止所有1b进程": "All 1b tasks has been stopped", + "已终止所有一键三连进程": "All one-clicking formatting tasks has been stopped", + "已终止所有切割进程": "All audio slicing tasks has been stopped", + "已终止所有语义token进程": "All semantics token tasks has been stopped", + "开启GPT训练": "Start GPT training", + "开启SSL提取": "Start SSL extracting", + "开启SoVITS训练": "Start SoVITS training", + "开启一键三连": "Start one-click formatting", + "开启文本获取": "Start speech-to-text", + "开启离线批量ASR": "Start batch ASR", + "开启语义token提取": "Start semantics token extraction", + "开启语音切割": "Start audio slicer", + "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", + "总训练轮数total_epoch": "Total training epochs (total_epoch):", + "总训练轮数total_epoch,不建议太高": "Total epochs, do not increase to a value that is too high", + "打标工具WebUI已关闭": "proofreading tool webui is closed", + "打标工具WebUI已开启": "proofreading tool webui is opened", + "打标工具进程输出信息": "Proofreading tool output log", + "打标数据标注文件路径": "path to proofreading text file", + "批量ASR(中文only)输入文件夹路径": "Batch ASR (Chinese only) input folder", + "按中文句号。切": "按中文句号。切", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.", + "文本模块学习率权重": "Text model learning rate weighting", + "文本进程执行中": "Text processing", + "文本进程结束": "Finished text processing", + "文本进程输出信息": "Text processing output", + "施工中,请静候佳音": "In construction, please wait", + "日文": "Japanese", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:", + "是否开启TTS推理WebUI": "Open TTS inference WEBUI", + "是否开启UVR5-WebUI": "Open UVR5-WebUI", + "是否开启打标WebUI": "Open labelling WebUI", + "显卡信息": "GPU Information", + "每张显卡的batch_size": "Batch size per GPU:", + "终止ASR进程": "Stop ASR task", + "终止GPT训练": "Stop GPT training", + "终止SSL提取进程": "Stop SSL extraction", + "终止SoVITS训练": "Stop SoVITS training", + "终止一键三连": "Stop one-click formatting", + "终止文本获取进程": "Stop speech-to-text", + "终止语义token提取进程": "Stop semantics token extraction", + "终止语音切割": "Stop audio cutting", + "英文": "English", + "语义token提取进程执行中": "Semantics token extracting", + "语义token提取进程结束": "Finished semantics token extraction", + "语义token提取进程输出信息": "Sematics token extraction output log", + "语音切割进程输出信息": "Audio slicer output log", + "请上传参考音频": "请上传参考音频", + "输入路径不存在": "No input file or directory", + "输入路径存在但既不是文件也不是文件夹": "Input directory exists, but it is not a file or a folder", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "output folder (logs/{experiment name}) should have files and folders starts with 23456.", + "输出的语音": "输出的语音", + "进度:1a-done": "Progress:1a-done", + "进度:1a-done, 1b-ing": "Progress:1a-done, 1b-ing", + "进度:1a-ing": "Progress:1a-ing", + "进度:1a1b-done": "Progress:1a1b-done", + "进度:1a1b-done, 1cing": "Progress:1a1b-done, 1cing", + "进度:all-done": "Progress:all-done", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.", + "需要合成的切分前文本": "Inference text that needs to be sliced", + "需要合成的文本": "Inference text", + "需要合成的语种": "Inference text language", + "音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)", + "预训练的GPT模型路径": "Pretrained GPT model path", + "预训练的SSL模型路径": "Pretrained SSL model path", + "预训练的SoVITS-D模型路径": "Pretrained SoVITS-D model path", + "预训练的SoVITS-G模型路径": "Pretrained SoVITS-G model path", + "预训练的中文BERT模型路径": " Pretrained BERT model path", ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", "A模型权重": "Weight (w) for Model A:", "A模型路径": "Path to Model A:", From 827d0a337dfc00f81fa412ac6edbb01b75a35f5a Mon Sep 17 00:00:00 2001 From: DW <147780325+D3lik@users.noreply.github.com> Date: Sun, 21 Jan 2024 13:29:53 +1100 Subject: [PATCH 35/46] Update en_US.json --- i18n/locale/en_US.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 895f15ba..3357f01a 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -125,11 +125,11 @@ "语义token提取进程结束": "Finished semantics token extraction", "语义token提取进程输出信息": "Sematics token extraction output log", "语音切割进程输出信息": "Audio slicer output log", - "请上传参考音频": "请上传参考音频", + "请上传参考音频": "Please upload reference audio", "输入路径不存在": "No input file or directory", "输入路径存在但既不是文件也不是文件夹": "Input directory exists, but it is not a file or a folder", "输出logs/实验名目录下应有23456开头的文件和文件夹": "output folder (logs/{experiment name}) should have files and folders starts with 23456.", - "输出的语音": "输出的语音", + "输出的语音": "Inference Result", "进度:1a-done": "Progress:1a-done", "进度:1a-done, 1b-ing": "Progress:1a-done, 1b-ing", "进度:1a-ing": "Progress:1a-ing", From 845fa69414125731d85dbd500ceff676fbf0932b Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 16:58:24 +0800 Subject: [PATCH 36/46] Create README.md --- docs/cn/README.md | 160 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 docs/cn/README.md diff --git a/docs/cn/README.md b/docs/cn/README.md new file mode 100644 index 00000000..5993b081 --- /dev/null +++ b/docs/cn/README.md @@ -0,0 +1,160 @@ +
+ +

GPT-SoVITS-WebUI

+少样本强大的声音转换与文本到语音网络界面。

+ +[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange +)](https://github.com/RVC-Boss/GPT-SoVITS) + +
+ +[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) +[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) + +[**English**](./README.md) | [**中文简体**](./README_ZH.md) + +
+ +------ + + + +> 查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) + +https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb + +## 功能: +1. **零样本文本到语音(TTS):** 输入5秒的声音样本,即刻体验文本到语音转换。 + +2. **少样本TTS:** 仅需1分钟的训练数据即可微调模型,提升声音相似度和真实感。 + +3. **跨语言支持:** 支持与训练数据集不同语言的推理,目前支持英语、日语和中文。 + +4. **WebUI工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注,协助初学者创建训练数据集和GPT/SoVITS模型。 + +## 环境准备 + +如果你是Windows用户(已在win>=10上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。 + +### Python和PyTorch版本 + +已在Python 3.9、PyTorch 2.0.1和CUDA 11上测试。 + +### 使用Conda快速安装 + +```bash +conda create -n GPTSoVits python=3.9 +conda activate GPTSoVits +bash install.sh +``` +### 手动安装包 +#### Pip包 + +```bash +pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet +``` + +#### 额外要求 + +如果你需要中文自动语音识别(由FunASR支持),请安装: + +```bash +pip install modelscope torchaudio sentencepiece funasr +``` + +#### FFmpeg + +##### Conda 使用者 +```bash +conda install ffmpeg +``` + +##### Ubuntu/Debian 使用者 + +```bash +sudo apt install ffmpeg +sudo apt install libsox-dev +conda install -c conda-forge 'ffmpeg<7' +``` + +##### MacOS 使用者 + +```bash +brew install ffmpeg +``` + +##### Windows 使用者 + +下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。 + +### 预训练模型 + + +从 [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) 下载预训练模型,并将它们放置在 `GPT_SoVITS\pretrained_models` 中。 + +对于中文自动语音识别(另外),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。 + +对于UVR5(人声/伴奏分离和混响移除,另外),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。 + + +## 数据集格式 + +文本到语音(TTS)注释 .list 文件格式: + +``` +vocal_path|speaker_name|language|text +``` + +语言字典: + +- 'zh': Chinese +- 'ja': Japanese +- 'en': English + +示例: + +``` +D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. +``` +## 待办事项清单 + +- [ ] **高优先级:** + - [ ] 日语和英语的本地化。 + - [ ] 用户指南。 + - [ ] 日语和英语数据集微调训练。 + +- [ ] **Features:** + - [ ] 零样本声音转换(5秒)/ 少样本声音转换(1分钟)。 + - [ ] TTS语速控制。 + - [ ] 增强的TTS情感控制。 + - [ ] 尝试将SoVITS令牌输入更改为词汇的概率分布。 + - [ ] 改进英语和日语文本前端。 + - [ ] 开发体积小和更大的TTS模型。 + - [ ] Colab脚本。 + - [ ] 扩展训练数据集(从2k小时到10k小时)。 + - [ ] 更好的sovits基础模型(增强的音频质量)。 + - [ ] 模型混合。 + +## 致谢 + +特别感谢以下项目和贡献者: + +- [ar-vits](https://github.com/innnky/ar-vits) +- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR) +- [vits](https://github.com/jaywalnut310/vits) +- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556) +- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain) +- [contentvec](https://github.com/auspicious3000/contentvec/) +- [hifi-gan](https://github.com/jik876/hifi-gan) +- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large) +- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41) +- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui) +- [audio-slicer](https://github.com/openvpi/audio-slicer) +- [SubFix](https://github.com/cronrpc/SubFix) +- [FFmpeg](https://github.com/FFmpeg/FFmpeg) +- [gradio](https://github.com/gradio-app/gradio) + +## 感谢所有贡献者的努力 + + + From 8ad264eabc81f2230185a3ce109f6327c36c233c Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 16:58:51 +0800 Subject: [PATCH 37/46] Delete README_ZH.md --- README_ZH.md | 160 --------------------------------------------------- 1 file changed, 160 deletions(-) delete mode 100644 README_ZH.md diff --git a/README_ZH.md b/README_ZH.md deleted file mode 100644 index db2f6bb8..00000000 --- a/README_ZH.md +++ /dev/null @@ -1,160 +0,0 @@ -
- -

GPT-SoVITS-WebUI

-少样本强大的声音转换与文本到语音网络界面。

- -[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange -)](https://github.com/RVC-Boss/GPT-SoVITS) - -
- -[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main) - -[**English**](./README.md) | [**中文简体**](./README_ZH.md) - -
- ------- - - - -> 查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) - -https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb - -## 功能: -1. **零样本文本到语音(TTS):** 输入5秒的声音样本,即刻体验文本到语音转换。 - -2. **少样本TTS:** 仅需1分钟的训练数据即可微调模型,提升声音相似度和真实感。 - -3. **跨语言支持:** 支持与训练数据集不同语言的推理,目前支持英语、日语和中文。 - -4. **WebUI工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注,协助初学者创建训练数据集和GPT/SoVITS模型。 - -## 环境准备 - -如果你是Windows用户(已在win>=10上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击go-webui.bat即可启动GPT-SoVITS-WebUI。 - -### Python和PyTorch版本 - -已在Python 3.9、PyTorch 2.0.1和CUDA 11上测试。 - -### 使用Conda快速安装 - -```bash -conda create -n GPTSoVits python=3.9 -conda activate GPTSoVits -bash install.sh -``` -### 手动安装包 -#### Pip包 - -```bash -pip install torch numpy scipy tensorboard librosa==0.9.2 numba==0.56.4 pytorch-lightning gradio==3.14.0 ffmpeg-python onnxruntime tqdm cn2an pypinyin pyopenjtalk g2p_en chardet -``` - -#### 额外要求 - -如果你需要中文自动语音识别(由FunASR支持),请安装: - -```bash -pip install modelscope torchaudio sentencepiece funasr -``` - -#### FFmpeg - -##### Conda 使用者 -```bash -conda install ffmpeg -``` - -##### Ubuntu/Debian 使用者 - -```bash -sudo apt install ffmpeg -sudo apt install libsox-dev -conda install -c conda-forge 'ffmpeg<7' -``` - -##### MacOS 使用者 - -```bash -brew install ffmpeg -``` - -##### Windows 使用者 - -下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。 - -### 预训练模型 - - -从 [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) 下载预训练模型,并将它们放置在 `GPT_SoVITS\pretrained_models` 中。 - -对于中文自动语音识别(另外),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。 - -对于UVR5(人声/伴奏分离和混响移除,另外),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。 - - -## 数据集格式 - -文本到语音(TTS)注释 .list 文件格式: - -``` -vocal_path|speaker_name|language|text -``` - -语言字典: - -- 'zh': Chinese -- 'ja': Japanese -- 'en': English - -示例: - -``` -D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. -``` -## 待办事项清单 - -- [ ] **高优先级:** - - [ ] 日语和英语的本地化。 - - [ ] 用户指南。 - - [ ] 日语和英语数据集微调训练。 - -- [ ] **Features:** - - [ ] 零样本声音转换(5秒)/ 少样本声音转换(1分钟)。 - - [ ] TTS语速控制。 - - [ ] 增强的TTS情感控制。 - - [ ] 尝试将SoVITS令牌输入更改为词汇的概率分布。 - - [ ] 改进英语和日语文本前端。 - - [ ] 开发体积小和更大的TTS模型。 - - [ ] Colab脚本。 - - [ ] 扩展训练数据集(从2k到10k)。 - - [ ] 更好的sovits基础模型(增强的音频质量)。 - - [ ] 模型混合。 - -## 致谢 - -特别感谢以下项目和贡献者: - -- [ar-vits](https://github.com/innnky/ar-vits) -- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR) -- [vits](https://github.com/jaywalnut310/vits) -- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556) -- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain) -- [contentvec](https://github.com/auspicious3000/contentvec/) -- [hifi-gan](https://github.com/jik876/hifi-gan) -- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large) -- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41) -- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui) -- [audio-slicer](https://github.com/openvpi/audio-slicer) -- [SubFix](https://github.com/cronrpc/SubFix) -- [FFmpeg](https://github.com/FFmpeg/FFmpeg) -- [gradio](https://github.com/gradio-app/gradio) - -## 感谢所有贡献者的努力 - - - From 4da64b9a828f79a26266fda81028d7495bb1f849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B2=9B=E7=9F=B3?= <54824693+bruce2233@users.noreply.github.com> Date: Sun, 21 Jan 2024 18:39:37 +0800 Subject: [PATCH 38/46] [fix]: fix json quotation error --- i18n/locale/en_US.json | 552 ++++++++++++++++++++--------------------- 1 file changed, 276 insertions(+), 276 deletions(-) diff --git a/i18n/locale/en_US.json b/i18n/locale/en_US.json index 19124519..160f1034 100644 --- a/i18n/locale/en_US.json +++ b/i18n/locale/en_US.json @@ -1,277 +1,277 @@ { - '很遗憾您这没有能用的显卡来支持您训练': 'Unfortunately, there is no compatible GPU available to support your training.', - 'UVR5已开启': 'UVR5 opened ', - 'UVR5已关闭': 'UVR5 closed', - '本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.': 'This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.', - '0-前置数据集获取工具': '0-Fech dataset', - '0a-UVR5人声伴奏分离&去混响去延迟工具': '0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)', - '是否开启UVR5-WebUI': 'Open UVR5-WebUI', - 'UVR5进程输出信息': 'UVR5 process output log', - '0b-语音切分工具': '0b-Audio slicer', - '音频自动切分输入路径,可文件可文件夹': 'Audio slicer input (file or folder)', - '切分后的子音频的输出根目录': 'Audio slicer output folder', - 'threshold:音量小于这个值视作静音的备选切割点': 'Noise gate threshold (loudness below this value will be treated as noise', - 'min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值': 'Minimum length', - 'min_interval:最短切割间隔': 'Minumum interval for audio cutting', - 'hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)': 'hop_size: FO hop size, the smaller the value, the higher the accuracy)', - 'max_sil_kept:切完后静音最多留多长': 'Maximum length for silence to be kept', - '开启语音切割': 'Start audio slicer', - '终止语音切割': 'Stop audio cutting', - 'max:归一化后最大值多少': 'Loudness multiplier after normalized', - 'alpha_mix:混多少比例归一化后音频进来': 'alpha_mix: proportion of normalized audio merged into dataset', - '切割使用的进程数': 'CPU threads used for audio slicing', - '语音切割进程输出信息': 'Audio slicer output log', - '0c-中文批量离线ASR工具': '0c-Chinese ASR tool', - '开启离线批量ASR': 'Start batch ASR', - '终止ASR进程': 'Stop ASR task', - '批量ASR(中文only)输入文件夹路径': 'Batch ASR (Chinese only) input folder', - 'ASR进程输出信息': 'ASR output log', - '0d-语音文本校对标注工具': '0d-Speech to text proofreading tool', - '是否开启打标WebUI': 'Open labelling WebUI', - '打标数据标注文件路径': 'path to proofreading text file', - '打标工具进程输出信息': 'Proofreading tool output log', - '1-GPT-SoVITS-TTS': '1-GPT-SOVITS-TTS', - '*实验/模型名': '*Experiment/model name', - '显卡信息': 'GPU Information', - '预训练的SoVITS-G模型路径': 'Pretrained SoVITS-G model path', - '预训练的SoVITS-D模型路径': 'Pretrained SoVITS-D model path', - '预训练的GPT模型路径': 'Pretrained GPT model path', - '1A-训练集格式化工具': '1A-Dataset formatting', - '输出logs/实验名目录下应有23456开头的文件和文件夹': 'output folder (logs/{experiment name}) should have files and folders starts with 23456.', - '*文本标注文件': '*Text labelling file', - '*训练集音频文件目录': '*Audio dataset folder', - '训练集音频文件目录 拼接 list文件里波形对应的文件名。': 'Training the file name corresponding to the waveform of the waveform in the List file of the audio file', - '1Aa-文本内容': '1Aa-Text', - 'GPU卡号以-分割,每个卡号一个进程': 'GPU number is separated by -, each GPU will run one process ', - '预训练的中文BERT模型路径': ' Pretrained BERT model path', - '开启文本获取': 'Start speech-to-text', - '终止文本获取进程': 'Stop speech-to-text', - '文本进程输出信息': 'Text processing output', - '1Ab-SSL自监督特征提取': '1Ab-SSL self-supervised feature extraction', - '预训练的SSL模型路径': 'Pretrained SSL model path', - '开启SSL提取': 'Start SSL extracting', - '终止SSL提取进程': 'Stop SSL extraction', - 'SSL进程输出信息': 'SSL output log', - '1Ac-语义token提取': '1Ac-semantics token extraction', - '开启语义token提取': 'Start semantics token extraction', - '终止语义token提取进程': 'Stop semantics token extraction', - '语义token提取进程输出信息': 'Sematics token extraction output log', - '1Aabc-训练集格式化一键三连': '1Aabc-One-click formatting', - '开启一键三连': 'Start one-click formatting', - '终止一键三连': 'Stop one-click formatting', - '一键三连进程输出信息': 'One-click formatting output', - '1B-微调训练': '1B-Fine-tuned training', - '1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。': '1Ba-SoVITS training. The model is located in SoVITS_weights.', - '每张显卡的batch_size': 'Batch size per GPU:', - '总训练轮数total_epoch,不建议太高': 'Total epochs, do not increase to a value that is too high', - '文本模块学习率权重': 'Text model learning rate weighting', - '保存频率save_every_epoch': 'Save frequency (save_every_epoch):', - '是否仅保存最新的ckpt文件以节省硬盘空间': "Save only the latest '.ckpt' file to save disk space:", - '是否在每次保存时间点将最终小模型保存至weights文件夹': "Save a small final model to the 'weights' folder at each save point:", - '开启SoVITS训练': 'Start SoVITS training', - '终止SoVITS训练': 'Stop SoVITS training', - 'SoVITS训练进程输出信息': 'SoVITS training output log', - '1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。': '1Bb-GPT training. The model is located in GPT_weights.', - '总训练轮数total_epoch': 'Total training epochs (total_epoch):', - '开启GPT训练': 'Start GPT training', - '终止GPT训练': 'Stop GPT training', - 'GPT训练进程输出信息': 'GPT training output log', - '1C-推理': '1C-inference', - '选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。': 'Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.', - '*GPT模型列表': '*GPT models list', - '*SoVITS模型列表': '*SoVITS models list', - 'GPU卡号,只能填1个整数': 'GPU number, can only input ONE integer', - '刷新模型路径': 'refreshing model paths', - '是否开启TTS推理WebUI': 'Open TTS inference WEBUI', - 'TTS推理WebUI进程输出信息': 'TTS inference webui output log', - '2-GPT-SoVITS-变声': '2-GPT-SoVITS-Voice Changer', - '施工中,请静候佳音': 'In construction, please wait', - 'TTS推理进程已开启': 'TTS inference process is opened', - 'TTS推理进程已关闭': 'TTS inference process closed', - '打标工具WebUI已开启': 'proofreading tool webui is opened', - '打标工具WebUI已关闭': 'proofreading tool webui is closed', - '本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.': 'This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.', - '*请上传并填写参考信息': '*Please upload and fill reference information', - '*请填写需要合成的目标文本': '*Please fill the text that needs inference', - 'ASR任务开启:%s': 'ASR training started: %s', - 'GPT训练完成': 'Finished GPT training', - 'GPT训练开始:%s': 'GPT training started: %s', - 'SSL提取进程执行中': 'SSL extracting', - 'SSL提取进程结束': 'SSL extraction finished', - 'SoVITS训练完成': 'SoVITS training finished', - 'SoVITS训练开始:%s': 'SoVITS training started:%s', - '一键三连中途报错': 'An error has occured during One-click formatting', - '一键三连进程结束': 'Finished one-click formatting', - '中文': 'Chinese', - '凑50字一切': 'Cut per 50 characters', - '凑五句一切': 'Cut per 5 sentences', - '切分后文本': 'Text after sliced', - '切割执行中': 'Slicing audio', - '切割结束': 'finished audio slicing', - '参考音频的文本': 'Text for reference audio', - '参考音频的语种': 'Language for reference audio', - '合成语音': 'Start inference', - '后续将支持混合语种编码文本输入。': 'Mixed languages input will be supported soon.', - '已有正在进行的ASR任务,需先终止才能开启下一次任务': ' An ASR task is already in progress, please stop before starting the next task', - '已有正在进行的GPT训练任务,需先终止才能开启下一次任务': 'A GPT training task is already in progress, please stop before starting the next task', - '已有正在进行的SSL提取任务,需先终止才能开启下一次任务': 'A SSL extraction task is already in progress, please stop before starting the next task', - '已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务': 'A SoVITS training task is already in progress, please stop before starting the next task', - '已有正在进行的一键三连任务,需先终止才能开启下一次任务': 'An ASR task is already in progress, please stop before starting the next task', - '已有正在进行的切割任务,需先终止才能开启下一次任务': 'An audio slicing task is already in progress, please stop before starting the next task', - '已有正在进行的文本任务,需先终止才能开启下一次任务': 'A TTS proofreading task is already in progress, please stop before starting the next task', - '已有正在进行的语义token提取任务,需先终止才能开启下一次任务': 'A semantics token extraction task is already in progress, please stop before starting the next task', - '已终止ASR进程': 'ASR task has been stopped', - '已终止GPT训练': 'GPT training has been stopped', - '已终止SoVITS训练': 'SoVITS training has been stopped', - '已终止所有1a进程': 'All 1a tasks has been stopped', - '已终止所有1b进程': 'All 1b tasks has been stopped', - '已终止所有一键三连进程': 'All one-clicking formatting tasks has been stopped', - '已终止所有切割进程': 'All audio slicing tasks has been stopped', - '已终止所有语义token进程': 'All semantics token tasks has been stopped', - '按中文句号。切': '按中文句号。切', - '文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。': 'Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.', - '文本进程执行中': 'Text processing', - '文本进程结束': 'Finished text processing', - '日文': 'Japanese', - '英文': 'English', - '语义token提取进程执行中': 'Semantics token extracting', - '语义token提取进程结束': 'Finished semantics token extraction', - '请上传参考音频': 'Please upload reference audio', - '输入路径不存在': 'No input file or directory', - '输入路径存在但既不是文件也不是文件夹': 'Input directory exists, but it is not a file or a folder', - '输出的语音': 'Inference Result', - '进度:1a-done': 'Progress:1a-done', - '进度:1a-done, 1b-ing': 'Progress:1a-done, 1b-ing', - '进度:1a-ing': 'Progress:1a-ing', - '进度:1a1b-done': 'Progress:1a1b-done', - '进度:1a1b-done, 1cing': 'Progress:1a1b-done, 1cing', - '进度:all-done': 'Progress:all-done', - '需要合成的切分前文本': 'Inference text that needs to be sliced', - '需要合成的文本': 'Inference text', - '需要合成的语种': 'Inference text language', - '>=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音': 'If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.', - 'A模型权重': 'Weight (w) for Model A:', - 'A模型路径': 'Path to Model A:', - 'B模型路径': 'Path to Model B:', - 'E:\\语音音频+标注\\米津玄师\\src': 'C:\\Users\\Desktop\\src', - 'F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调': 'F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:', - 'Index Rate': 'Index Rate', - 'Onnx导出': 'Export Onnx', - 'Onnx输出路径': 'Onnx Export Path:', - 'RVC模型路径': 'RVC Model Path:', - 'ckpt处理': 'ckpt Processing', - 'harvest进程数': 'Number of CPU processes used for harvest pitch algorithm', - 'index文件路径不可包含中文': 'index文件路径不可包含中文', - 'pth文件路径不可包含中文': 'pth文件路径不可包含中文', - 'rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程': "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1", - 'step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ': "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.", - 'step1:正在处理数据': 'Step 1: Processing data', - 'step2:正在提取音高&正在提取特征': 'step2:Pitch extraction & feature extraction', - 'step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ': 'Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.', - 'step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)': 'Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):', - 'step3: 填写训练设置, 开始训练模型和索引': 'Step 3: Fill in the training settings and start training the model and index', - 'step3a:正在训练模型': 'Step 3a: Model training started', - '一键训练': 'One-click training', - '也可批量输入音频文件, 二选一, 优先读文件夹': 'Multiple audio files can also be imported. If a folder path exists, this input is ignored.', - '人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
\u2003\u2003(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。': 'Batch processing for vocal accompaniment separation using the UVR5 model.
Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).
The model is divided into three categories:
1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.
2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.
3. De-reverb and de-delay models (by FoxJoy):
\u2003\u2003(1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;
 (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.
De-reverb/de-delay notes:
1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.
2. The MDX-Net-Dereverb model is quite slow.
3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.', - '以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2': "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:", - '伴奏人声分离&去混响&去回声': 'Vocals/Accompaniment Separation & Reverberation Removal', - '使用模型采样率': '使用模型采样率', - '使用设备采样率': '使用设备采样率', - '保存名': 'Save name:', - '保存的文件名, 默认空为和源文件同名': 'Save file name (default: same as the source file):', - '保存的模型名不带后缀': 'Saved model name (without extension):', - '保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果': 'Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:', - '修改': 'Modify', - '修改模型信息(仅支持weights文件夹下提取的小模型文件)': "Modify model information (only supported for small model files extracted from the 'weights' folder)", - '停止音频转换': 'Stop audio conversion', - '全流程结束!': 'All processes have been completed!', - '刷新音色列表和索引路径': 'Refresh voice list and index path', - '加载模型': 'Load model', - '加载预训练底模D路径': 'Load pre-trained base model D path:', - '加载预训练底模G路径': 'Load pre-trained base model G path:', - '单次推理': 'Single Inference', - '卸载音色省显存': 'Unload voice to save GPU memory:', - '变调(整数, 半音数量, 升八度12降八度-12)': 'Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):', - '后处理重采样至最终采样率,0为不进行重采样': 'Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:', - '否': 'No', - '启用相位声码器': '启用相位声码器', - '响应阈值': 'Response threshold', - '响度因子': 'loudness factor', - '处理数据': 'Process data', - '导出Onnx模型': 'Export Onnx Model', - '导出文件格式': 'Export file format', - '常见问题解答': 'FAQ (Frequently Asked Questions)', - '常规设置': 'General settings', - '开始音频转换': 'Start audio conversion', - '性能设置': 'Performance settings', - '批量推理': 'Batch Inference', - '批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ': "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", - '指定输出主人声文件夹': 'Specify the output folder for vocals:', - '指定输出文件夹': 'Specify output folder:', - '指定输出非主人声文件夹': 'Specify the output folder for accompaniment:', - '推理时间(ms):': 'Inference time (ms):', - '推理音色': 'Inferencing voice:', - '提取': 'Extract', - '提取音高和处理数据使用的CPU进程数': 'Number of CPU processes used for pitch extraction and data processing:', - '是': 'Yes', - '是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速': 'Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:', - '查看': 'View', - '查看模型信息(仅支持weights文件夹下提取的小模型文件)': "View model information (only supported for small model files extracted from the 'weights' folder)", - '检索特征占比': 'Search feature ratio (controls accent strength, too high has artifacting):', - '模型': 'Model', - '模型推理': 'Model Inference', - '模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况': "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:", - '模型是否带音高指导': 'Whether the model has pitch guidance:', - '模型是否带音高指导(唱歌一定要, 语音可以不要)': 'Whether the model has pitch guidance (required for singing, optional for speech):', - '模型是否带音高指导,1是0否': 'Whether the model has pitch guidance (1: yes, 0: no):', - '模型版本型号': 'Model architecture version:', - '模型融合, 可用于测试音色融合': 'Model fusion, can be used to test timbre fusion', - '模型路径': 'Path to Model:', - '淡入淡出长度': 'Fade length', - '版本': 'Version', - '特征提取': 'Feature extraction', - '特征检索库文件路径,为空则使用下拉的选择结果': 'Path to the feature index file. Leave blank to use the selected result from the dropdown:', - '男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ': 'Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.', - '目标采样率': 'Target sample rate:', - '算法延迟(ms):': 'Algorithmic delays(ms):', - '自动检测index路径,下拉式选择(dropdown)': 'Auto-detect index path and select from the dropdown:', - '融合': 'Fusion', - '要改的模型信息': 'Model information to be modified:', - '要置入的模型信息': 'Model information to be placed:', - '训练': 'Train', - '训练模型': 'Train model', - '训练特征索引': 'Train feature index', - '训练结束, 您可查看控制台训练日志或实验文件夹下的train.log': "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", - '请指定说话人id': 'Please specify the speaker/singer ID:', - '请选择index文件': 'Please choose the .index file', - '请选择pth文件': 'Please choose the .pth file', - '请选择说话人id': 'Select Speaker/Singer ID:', - '转换': 'Convert', - '输入实验名': 'Enter the experiment name:', - '输入待处理音频文件夹路径': 'Enter the path of the audio folder to be processed:', - '输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)': 'Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):', - '输入待处理音频文件路径(默认是正确格式示例)': 'Enter the path of the audio file to be processed (default is the correct format example):', - '输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络': 'Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:', - '输入监听': 'Input voice monitor', - '输入训练文件夹路径': 'Enter the path of the training folder:', - '输入设备': 'Input device', - '输入降噪': 'Input noise reduction', - '输出信息': 'Output information', - '输出变声': 'Output converted voice', - '输出设备': 'Output device', - '输出降噪': 'Output noise reduction', - '输出音频(右下角三个点,点了可以下载)': 'Export audio (click on the three dots in the lower right corner to download)', - '选择.index文件': 'Select the .index file', - '选择.pth文件': 'Select the .pth file', - '选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU': '选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU', - '选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU': "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", - '选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU': "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", - '采样率:': '采样率:', - '采样长度': 'Sample length', - '重载设备列表': 'Reload device list', - '音调设置': 'Pitch settings', - '音频设备(请使用同种类驱动)': 'Audio device (please use the same type of driver)', - '音高算法': 'pitch detection algorithm', - '额外推理时长': 'Extra inference time' -} \ No newline at end of file + "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", + "UVR5已开启": "UVR5 opened ", + "UVR5已关闭": "UVR5 closed", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.", + "0-前置数据集获取工具": "0-Fech dataset", + "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)", + "是否开启UVR5-WebUI": "Open UVR5-WebUI", + "UVR5进程输出信息": "UVR5 process output log", + "0b-语音切分工具": "0b-Audio slicer", + "音频自动切分输入路径,可文件可文件夹": "Audio slicer input (file or folder)", + "切分后的子音频的输出根目录": "Audio slicer output folder", + "threshold:音量小于这个值视作静音的备选切割点": "Noise gate threshold (loudness below this value will be treated as noise", + "min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "Minimum length", + "min_interval:最短切割间隔": "Minumum interval for audio cutting", + "hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "hop_size: FO hop size, the smaller the value, the higher the accuracy)", + "max_sil_kept:切完后静音最多留多长": "Maximum length for silence to be kept", + "开启语音切割": "Start audio slicer", + "终止语音切割": "Stop audio cutting", + "max:归一化后最大值多少": "Loudness multiplier after normalized", + "alpha_mix:混多少比例归一化后音频进来": "alpha_mix: proportion of normalized audio merged into dataset", + "切割使用的进程数": "CPU threads used for audio slicing", + "语音切割进程输出信息": "Audio slicer output log", + "0c-中文批量离线ASR工具": "0c-Chinese ASR tool", + "开启离线批量ASR": "Start batch ASR", + "终止ASR进程": "Stop ASR task", + "批量ASR(中文only)输入文件夹路径": "Batch ASR (Chinese only) input folder", + "ASR进程输出信息": "ASR output log", + "0d-语音文本校对标注工具": "0d-Speech to text proofreading tool", + "是否开启打标WebUI": "Open labelling WebUI", + "打标数据标注文件路径": "path to proofreading text file", + "打标工具进程输出信息": "Proofreading tool output log", + "1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS", + "*实验/模型名": "*Experiment/model name", + "显卡信息": "GPU Information", + "预训练的SoVITS-G模型路径": "Pretrained SoVITS-G model path", + "预训练的SoVITS-D模型路径": "Pretrained SoVITS-D model path", + "预训练的GPT模型路径": "Pretrained GPT model path", + "1A-训练集格式化工具": "1A-Dataset formatting", + "输出logs/实验名目录下应有23456开头的文件和文件夹": "output folder (logs/{experiment name}) should have files and folders starts with 23456.", + "*文本标注文件": "*Text labelling file", + "*训练集音频文件目录": "*Audio dataset folder", + "训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Training the file name corresponding to the waveform of the waveform in the List file of the audio file", + "1Aa-文本内容": "1Aa-Text", + "GPU卡号以-分割,每个卡号一个进程": "GPU number is separated by -, each GPU will run one process ", + "预训练的中文BERT模型路径": " Pretrained BERT model path", + "开启文本获取": "Start speech-to-text", + "终止文本获取进程": "Stop speech-to-text", + "文本进程输出信息": "Text processing output", + "1Ab-SSL自监督特征提取": "1Ab-SSL self-supervised feature extraction", + "预训练的SSL模型路径": "Pretrained SSL model path", + "开启SSL提取": "Start SSL extracting", + "终止SSL提取进程": "Stop SSL extraction", + "SSL进程输出信息": "SSL output log", + "1Ac-语义token提取": "1Ac-semantics token extraction", + "开启语义token提取": "Start semantics token extraction", + "终止语义token提取进程": "Stop semantics token extraction", + "语义token提取进程输出信息": "Sematics token extraction output log", + "1Aabc-训练集格式化一键三连": "1Aabc-One-click formatting", + "开启一键三连": "Start one-click formatting", + "终止一键三连": "Stop one-click formatting", + "一键三连进程输出信息": "One-click formatting output", + "1B-微调训练": "1B-Fine-tuned training", + "1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1Ba-SoVITS training. The model is located in SoVITS_weights.", + "每张显卡的batch_size": "Batch size per GPU:", + "总训练轮数total_epoch,不建议太高": "Total epochs, do not increase to a value that is too high", + "文本模块学习率权重": "Text model learning rate weighting", + "保存频率save_every_epoch": "Save frequency (save_every_epoch):", + "是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:", + "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:", + "开启SoVITS训练": "Start SoVITS training", + "终止SoVITS训练": "Stop SoVITS training", + "SoVITS训练进程输出信息": "SoVITS training output log", + "1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1Bb-GPT training. The model is located in GPT_weights.", + "总训练轮数total_epoch": "Total training epochs (total_epoch):", + "开启GPT训练": "Start GPT training", + "终止GPT训练": "Stop GPT training", + "GPT训练进程输出信息": "GPT training output log", + "1C-推理": "1C-inference", + "选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。": "Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.", + "*GPT模型列表": "*GPT models list", + "*SoVITS模型列表": "*SoVITS models list", + "GPU卡号,只能填1个整数": "GPU number, can only input ONE integer", + "刷新模型路径": "refreshing model paths", + "是否开启TTS推理WebUI": "Open TTS inference WEBUI", + "TTS推理WebUI进程输出信息": "TTS inference webui output log", + "2-GPT-SoVITS-变声": "2-GPT-SoVITS-Voice Changer", + "施工中,请静候佳音": "In construction, please wait", + "TTS推理进程已开启": "TTS inference process is opened", + "TTS推理进程已关闭": "TTS inference process closed", + "打标工具WebUI已开启": "proofreading tool webui is opened", + "打标工具WebUI已关闭": "proofreading tool webui is closed", + "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. 如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.", + "*请上传并填写参考信息": "*Please upload and fill reference information", + "*请填写需要合成的目标文本": "*Please fill the text that needs inference", + "ASR任务开启:%s": "ASR training started: %s", + "GPT训练完成": "Finished GPT training", + "GPT训练开始:%s": "GPT training started: %s", + "SSL提取进程执行中": "SSL extracting", + "SSL提取进程结束": "SSL extraction finished", + "SoVITS训练完成": "SoVITS training finished", + "SoVITS训练开始:%s": "SoVITS training started:%s", + "一键三连中途报错": "An error has occured during One-click formatting", + "一键三连进程结束": "Finished one-click formatting", + "中文": "Chinese", + "凑50字一切": "Cut per 50 characters", + "凑五句一切": "Cut per 5 sentences", + "切分后文本": "Text after sliced", + "切割执行中": "Slicing audio", + "切割结束": "finished audio slicing", + "参考音频的文本": "Text for reference audio", + "参考音频的语种": "Language for reference audio", + "合成语音": "Start inference", + "后续将支持混合语种编码文本输入。": "Mixed languages input will be supported soon.", + "已有正在进行的ASR任务,需先终止才能开启下一次任务": " An ASR task is already in progress, please stop before starting the next task", + "已有正在进行的GPT训练任务,需先终止才能开启下一次任务": "A GPT training task is already in progress, please stop before starting the next task", + "已有正在进行的SSL提取任务,需先终止才能开启下一次任务": "A SSL extraction task is already in progress, please stop before starting the next task", + "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务": "A SoVITS training task is already in progress, please stop before starting the next task", + "已有正在进行的一键三连任务,需先终止才能开启下一次任务": "An ASR task is already in progress, please stop before starting the next task", + "已有正在进行的切割任务,需先终止才能开启下一次任务": "An audio slicing task is already in progress, please stop before starting the next task", + "已有正在进行的文本任务,需先终止才能开启下一次任务": "A TTS proofreading task is already in progress, please stop before starting the next task", + "已有正在进行的语义token提取任务,需先终止才能开启下一次任务": "A semantics token extraction task is already in progress, please stop before starting the next task", + "已终止ASR进程": "ASR task has been stopped", + "已终止GPT训练": "GPT training has been stopped", + "已终止SoVITS训练": "SoVITS training has been stopped", + "已终止所有1a进程": "All 1a tasks has been stopped", + "已终止所有1b进程": "All 1b tasks has been stopped", + "已终止所有一键三连进程": "All one-clicking formatting tasks has been stopped", + "已终止所有切割进程": "All audio slicing tasks has been stopped", + "已终止所有语义token进程": "All semantics token tasks has been stopped", + "按中文句号。切": "按中文句号。切", + "文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。": "Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.", + "文本进程执行中": "Text processing", + "文本进程结束": "Finished text processing", + "日文": "Japanese", + "英文": "English", + "语义token提取进程执行中": "Semantics token extracting", + "语义token提取进程结束": "Finished semantics token extraction", + "请上传参考音频": "Please upload reference audio", + "输入路径不存在": "No input file or directory", + "输入路径存在但既不是文件也不是文件夹": "Input directory exists, but it is not a file or a folder", + "输出的语音": "Inference Result", + "进度:1a-done": "Progress:1a-done", + "进度:1a-done, 1b-ing": "Progress:1a-done, 1b-ing", + "进度:1a-ing": "Progress:1a-ing", + "进度:1a1b-done": "Progress:1a1b-done", + "进度:1a1b-done, 1cing": "Progress:1a1b-done, 1cing", + "进度:all-done": "Progress:all-done", + "需要合成的切分前文本": "Inference text that needs to be sliced", + "需要合成的文本": "Inference text", + "需要合成的语种": "Inference text language", + ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", + "A模型权重": "Weight (w) for Model A:", + "A模型路径": "Path to Model A:", + "B模型路径": "Path to Model B:", + "E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src", + "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:", + "Index Rate": "Index Rate", + "Onnx导出": "Export Onnx", + "Onnx输出路径": "Onnx Export Path:", + "RVC模型路径": "RVC Model Path:", + "ckpt处理": "ckpt Processing", + "harvest进程数": "Number of CPU processes used for harvest pitch algorithm", + "index文件路径不可包含中文": "index文件路径不可包含中文", + "pth文件路径不可包含中文": "pth文件路径不可包含中文", + "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程": "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1", + "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.", + "step1:正在处理数据": "Step 1: Processing data", + "step2:正在提取音高&正在提取特征": "step2:Pitch extraction & feature extraction", + "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.", + "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):", + "step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index", + "step3a:正在训练模型": "Step 3a: Model training started", + "一键训练": "One-click training", + "也可批量输入音频文件, 二选一, 优先读文件夹": "Multiple audio files can also be imported. If a folder path exists, this input is ignored.", + "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
\u2003\u2003(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.
Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).
The model is divided into three categories:
1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.
2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.
3. De-reverb and de-delay models (by FoxJoy):
\u2003\u2003(1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;
 (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.
De-reverb/de-delay notes:
1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.
2. The MDX-Net-Dereverb model is quite slow.
3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.", + "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:", + "伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal", + "使用模型采样率": "使用模型采样率", + "使用设备采样率": "使用设备采样率", + "保存名": "Save name:", + "保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):", + "保存的模型名不带后缀": "Saved model name (without extension):", + "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:", + "修改": "Modify", + "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)", + "停止音频转换": "Stop audio conversion", + "全流程结束!": "All processes have been completed!", + "刷新音色列表和索引路径": "Refresh voice list and index path", + "加载模型": "Load model", + "加载预训练底模D路径": "Load pre-trained base model D path:", + "加载预训练底模G路径": "Load pre-trained base model G path:", + "单次推理": "Single Inference", + "卸载音色省显存": "Unload voice to save GPU memory:", + "变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", + "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", + "否": "No", + "启用相位声码器": "启用相位声码器", + "响应阈值": "Response threshold", + "响度因子": "loudness factor", + "处理数据": "Process data", + "导出Onnx模型": "Export Onnx Model", + "导出文件格式": "Export file format", + "常见问题解答": "FAQ (Frequently Asked Questions)", + "常规设置": "General settings", + "开始音频转换": "Start audio conversion", + "性能设置": "Performance settings", + "批量推理": "Batch Inference", + "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", + "指定输出主人声文件夹": "Specify the output folder for vocals:", + "指定输出文件夹": "Specify output folder:", + "指定输出非主人声文件夹": "Specify the output folder for accompaniment:", + "推理时间(ms):": "Inference time (ms):", + "推理音色": "Inferencing voice:", + "提取": "Extract", + "提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:", + "是": "Yes", + "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:", + "查看": "View", + "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)", + "检索特征占比": "Search feature ratio (controls accent strength, too high has artifacting):", + "模型": "Model", + "模型推理": "Model Inference", + "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:", + "模型是否带音高指导": "Whether the model has pitch guidance:", + "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):", + "模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):", + "模型版本型号": "Model architecture version:", + "模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion", + "模型路径": "Path to Model:", + "淡入淡出长度": "Fade length", + "版本": "Version", + "特征提取": "Feature extraction", + "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", + "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", + "目标采样率": "Target sample rate:", + "算法延迟(ms):": "Algorithmic delays(ms):", + "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown:", + "融合": "Fusion", + "要改的模型信息": "Model information to be modified:", + "要置入的模型信息": "Model information to be placed:", + "训练": "Train", + "训练模型": "Train model", + "训练特征索引": "Train feature index", + "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", + "请指定说话人id": "Please specify the speaker/singer ID:", + "请选择index文件": "Please choose the .index file", + "请选择pth文件": "Please choose the .pth file", + "请选择说话人id": "Select Speaker/Singer ID:", + "转换": "Convert", + "输入实验名": "Enter the experiment name:", + "输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:", + "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", + "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", + "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:", + "输入监听": "Input voice monitor", + "输入训练文件夹路径": "Enter the path of the training folder:", + "输入设备": "Input device", + "输入降噪": "Input noise reduction", + "输出信息": "Output information", + "输出变声": "Output converted voice", + "输出设备": "Output device", + "输出降噪": "Output noise reduction", + "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", + "选择.index文件": "Select the .index file", + "选择.pth文件": "Select the .pth file", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", + "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", + "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", + "采样率:": "采样率:", + "采样长度": "Sample length", + "重载设备列表": "Reload device list", + "音调设置": "Pitch settings", + "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)", + "音高算法": "pitch detection algorithm", + "额外推理时长": "Extra inference time" +} From ea62d6e0cf1efd75287766ea2b55d1c3b69b4fd3 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 22:47:32 +0800 Subject: [PATCH 39/46] Add files via upload --- GPT_SoVITS/inference_webui.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index e5e604f5..32c9b4a5 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -115,7 +115,6 @@ vq_model.eval() print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) hz = 50 max_sec = config["data"]["max_sec"] -# t2s_model = Text2SemanticLightningModule.load_from_checkpoint(checkpoint_path=gpt_path, config=config, map_location="cpu")#########todo t2s_model = Text2SemanticLightningModule(config, "ojbk", is_train=False) t2s_model.load_state_dict(dict_s1["weight"]) if is_half == True: @@ -149,13 +148,21 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") + zero_wav = np.zeros( + int(hps.data.sampling_rate * 0.3), + dtype=np.float16 if is_half == True else np.float32, + ) with torch.no_grad(): - wav16k, sr = librosa.load(ref_wav_path, sr=16000) # 派蒙 + wav16k, sr = librosa.load(ref_wav_path, sr=16000) wav16k = torch.from_numpy(wav16k) + zero_wav_torch = torch.from_numpy(zero_wav) if is_half == True: wav16k = wav16k.half().to(device) + zero_wav_torch = zero_wav_torch.half().to(device) else: wav16k = wav16k.to(device) + zero_wav_torch = zero_wav_torch.to(device) + wav16k=torch.cat([wav16k,zero_wav_torch]) ssl_content = ssl_model.model(wav16k.unsqueeze(0))[ "last_hidden_state" ].transpose( @@ -170,10 +177,6 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) phones1 = cleaned_text_to_sequence(phones1) texts = text.split("\n") audio_opt = [] - zero_wav = np.zeros( - int(hps.data.sampling_rate * 0.3), - dtype=np.float16 if is_half == True else np.float32, - ) for text in texts: # 解决输入目标文本的空行导致报错的问题 if (len(text.strip()) == 0): From 7b89c9ed5669f63c4ed6ae791408969640bdcf3e Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 22:47:51 +0800 Subject: [PATCH 40/46] Add files via upload --- GPT_SoVITS/module/data_utils.py | 137 +++++++++++--------------------- 1 file changed, 45 insertions(+), 92 deletions(-) diff --git a/GPT_SoVITS/module/data_utils.py b/GPT_SoVITS/module/data_utils.py index 15f401da..ff4c4f43 100644 --- a/GPT_SoVITS/module/data_utils.py +++ b/GPT_SoVITS/module/data_utils.py @@ -1,6 +1,8 @@ -import time, logging +import time +import logging import os -import random, traceback +import random +import traceback import numpy as np import torch import torch.utils.data @@ -12,15 +14,12 @@ from text import cleaned_text_to_sequence from utils import load_wav_to_torch, load_filepaths_and_text import torch.nn.functional as F from functools import lru_cache -import torch import requests from scipy.io import wavfile from io import BytesIO - -# from config import exp_dir from my_utils import load_audio - +# ZeroDivisionError fixed by Tybost (https://github.com/RVC-Boss/GPT-SoVITS/issues/79) class TextAudioSpeakerLoader(torch.utils.data.Dataset): """ 1) loads audio, speaker_id, text pairs @@ -44,7 +43,7 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): for line in lines: tmp = line.split("\t") - if len(tmp) != 4: + if (len(tmp) != 4): continue self.phoneme_data[tmp[0]] = [tmp[1]] @@ -52,7 +51,7 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): tmp = self.audiopaths_sid_text leng = len(tmp) min_num = 100 - if leng < min_num: + if (leng < min_num): self.audiopaths_sid_text = [] for _ in range(max(2, int(min_num / leng))): self.audiopaths_sid_text += tmp @@ -77,20 +76,28 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): for audiopath in tqdm(self.audiopaths_sid_text): try: phoneme = self.phoneme_data[audiopath][0] - phoneme = phoneme.split(" ") + phoneme = phoneme.split(' ') phoneme_ids = cleaned_text_to_sequence(phoneme) except Exception: print(f"{audiopath} not in self.phoneme_data !") skipped_phone += 1 continue + size = os.path.getsize("%s/%s" % (self.path5, audiopath)) duration = size / self.sampling_rate / 2 + + if duration == 0: + print(f"Zero duration for {audiopath}, skipping...") + skipped_dur += 1 + continue + if 54 > duration > 0.6 or self.val: audiopaths_sid_text_new.append([audiopath, phoneme_ids]) lengths.append(size // (2 * self.hop_length)) else: skipped_dur += 1 continue + print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur) print("total left: ", len(audiopaths_sid_text_new)) assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo @@ -103,10 +110,8 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): try: spec, wav = self.get_audio("%s/%s" % (self.path5, audiopath)) with torch.no_grad(): - ssl = torch.load( - "%s/%s.pt" % (self.path4, audiopath), map_location="cpu" - ) - if ssl.shape[-1] != spec.shape[-1]: + ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu") + if (ssl.shape[-1] != spec.shape[-1]): typee = ssl.dtype ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee) ssl.requires_grad = False @@ -117,25 +122,15 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): ssl = torch.zeros(1, 768, 100) text = text[-1:] print("load audio or ssl error!!!!!!", audiopath) - # print(ssl.requires_grad,spec.requires_grad,wav.requires_grad,text.requires_grad) return (ssl, spec, wav, text) def get_audio(self, filename): - audio_array = load_audio( - filename, self.sampling_rate - ) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768 - # print(filename,audio_array.max(),audio_array.min(),audio_array.mean()) + audio_array = load_audio(filename, self.sampling_rate) # load_audio的方法是已经归一化到-1~1之间的,不用再/32768 audio = torch.FloatTensor(audio_array) # /32768 audio_norm = audio audio_norm = audio_norm.unsqueeze(0) - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) + spec = spectrogram_torch(audio_norm, self.filter_length, self.sampling_rate, self.hop_length, self.win_length, + center=False) spec = torch.squeeze(spec, 0) return spec, audio_norm @@ -152,14 +147,11 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): def random_slice(self, ssl, wav, mel): assert abs(ssl.shape[-1] - wav.shape[-1] // self.hop_length) < 3, ( - "first", - ssl.shape, - wav.shape, - ) + "first", ssl.shape, wav.shape) len_mel = mel.shape[1] if self.val: - reference_mel = mel[:, : len_mel // 3] + reference_mel = mel[:, :len_mel // 3] return reference_mel, ssl, wav, mel dir = random.randint(0, 1) sep_point = random.randint(int(len_mel // 3), int(len_mel // 3 * 2)) @@ -167,29 +159,22 @@ class TextAudioSpeakerLoader(torch.utils.data.Dataset): if dir == 0: reference_mel = mel[:, :sep_point] ssl = ssl[:, :, sep_point:] - wav2 = wav[:, sep_point * self.hop_length :] + wav2 = wav[:, sep_point * self.hop_length:] mel = mel[:, sep_point:] else: reference_mel = mel[:, sep_point:] ssl = ssl[:, :, :sep_point] - wav2 = wav[:, : sep_point * self.hop_length] + wav2 = wav[:, :sep_point * self.hop_length] mel = mel[:, :sep_point] assert abs(ssl.shape[-1] - wav2.shape[-1] // self.hop_length) < 3, ( - ssl.shape, - wav.shape, - wav2.shape, - mel.shape, - sep_point, - self.hop_length, - sep_point * self.hop_length, - dir, - ) + ssl.shape, wav.shape, wav2.shape, mel.shape, sep_point, self.hop_length, sep_point * self.hop_length, dir) return reference_mel, ssl, wav2, mel -class TextAudioSpeakerCollate: - """Zero-pads model inputs and targets""" +class TextAudioSpeakerCollate(): + """ Zero-pads model inputs and targets + """ def __init__(self, return_ids=False): self.return_ids = return_ids @@ -202,8 +187,8 @@ class TextAudioSpeakerCollate: """ # Right zero-pad all one-hot text sequences to max input length _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True - ) + torch.LongTensor([x[1].size(1) for x in batch]), + dim=0, descending=True) max_ssl_len = max([x[0].size(2) for x in batch]) max_ssl_len = int(2 * ((max_ssl_len // 2) + 1)) @@ -231,31 +216,22 @@ class TextAudioSpeakerCollate: row = batch[ids_sorted_decreasing[i]] ssl = row[0] - ssl_padded[i, :, : ssl.size(2)] = ssl[0, :, :] + ssl_padded[i, :, :ssl.size(2)] = ssl[0, :, :] ssl_lengths[i] = ssl.size(2) spec = row[1] - spec_padded[i, :, : spec.size(1)] = spec + spec_padded[i, :, :spec.size(1)] = spec spec_lengths[i] = spec.size(1) wav = row[2] - wav_padded[i, :, : wav.size(1)] = wav + wav_padded[i, :, :wav.size(1)] = wav wav_lengths[i] = wav.size(1) text = row[3] - text_padded[i, : text.size(0)] = text + text_padded[i, :text.size(0)] = text text_lengths[i] = text.size(0) - return ( - ssl_padded, - ssl_lengths, - spec_padded, - spec_lengths, - wav_padded, - wav_lengths, - text_padded, - text_lengths, - ) + return ssl_padded, ssl_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, text_padded, text_lengths class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): @@ -268,18 +244,9 @@ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. """ - def __init__( - self, - dataset, - batch_size, - boundaries, - num_replicas=None, - rank=None, - shuffle=True, - ): + def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) self.lengths = dataset.lengths - # print(233333333333333,self.lengths,dir(dataset)) self.batch_size = batch_size self.boundaries = boundaries @@ -295,24 +262,22 @@ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): if idx_bucket != -1: buckets[idx_bucket].append(i) - for i in range(len(buckets) - 1, 0, -1): - # for i in range(len(buckets) - 1, -1, -1): + i = len(buckets) - 1 + while i >= 0: if len(buckets[i]) == 0: buckets.pop(i) self.boundaries.pop(i + 1) + i -= 1 num_samples_per_bucket = [] for i in range(len(buckets)): len_bucket = len(buckets[i]) total_batch_size = self.num_replicas * self.batch_size - rem = ( - total_batch_size - (len_bucket % total_batch_size) - ) % total_batch_size + rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size num_samples_per_bucket.append(len_bucket + rem) return buckets, num_samples_per_bucket def __iter__(self): - # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) @@ -331,25 +296,13 @@ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): ids_bucket = indices[i] num_samples_bucket = self.num_samples_per_bucket[i] - # add extra samples to make it evenly divisible rem = num_samples_bucket - len_bucket - ids_bucket = ( - ids_bucket - + ids_bucket * (rem // len_bucket) - + ids_bucket[: (rem % len_bucket)] - ) + ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - # subsample - ids_bucket = ids_bucket[self.rank :: self.num_replicas] + ids_bucket = ids_bucket[self.rank::self.num_replicas] - # batching for j in range(len(ids_bucket) // self.batch_size): - batch = [ - bucket[idx] - for idx in ids_bucket[ - j * self.batch_size : (j + 1) * self.batch_size - ] - ] + batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] batches.append(batch) if self.shuffle: @@ -376,4 +329,4 @@ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): return -1 def __len__(self): - return self.num_samples // self.batch_size + return self.num_samples // self.batch_size \ No newline at end of file From 1f5bcd87ff741d022526213b091ef5ad5a110ab0 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 22:48:33 +0800 Subject: [PATCH 41/46] Add files via upload --- tools/subfix_webui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/subfix_webui.py b/tools/subfix_webui.py index 0e6585eb..4985eef2 100644 --- a/tools/subfix_webui.py +++ b/tools/subfix_webui.py @@ -79,6 +79,7 @@ def b_change_index(index, batch): def b_next_index(index, batch): + b_save_file() if (index + batch) <= g_max_json_index: return index + batch , *b_change_index(index + batch, batch) else: @@ -86,6 +87,7 @@ def b_next_index(index, batch): def b_previous_index(index, batch): + b_save_file() if (index - batch) >= 0: return index - batch , *b_change_index(index - batch, batch) else: From a87ad5228ed2d729da42019ae1b93171f6a745ef Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 22:48:37 +0800 Subject: [PATCH 42/46] Add files via upload --- tools/damo_asr/cmd-asr.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/damo_asr/cmd-asr.py b/tools/damo_asr/cmd-asr.py index 9d1c4b9d..27755622 100644 --- a/tools/damo_asr/cmd-asr.py +++ b/tools/damo_asr/cmd-asr.py @@ -6,11 +6,18 @@ import sys,os,traceback dir=sys.argv[1] # opt_name=dir.split("\\")[-1].split("/")[-1] opt_name=os.path.basename(dir) + +path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' +path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' +path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch' +path_asr=path_asr if os.path.exists(path_asr)else "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" +path_vad=path_vad if os.path.exists(path_vad)else "damo/speech_fsmn_vad_zh-cn-16k-common-pytorch" +path_punc=path_punc if os.path.exists(path_punc)else "damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, - model='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', - vad_model='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch', - punc_model='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', + model=path_asr, + vad_model=path_vad, + punc_model=path_punc, ) opt=[] From f61471166c107ba56ccb7a5137fa9d7c09b2830d Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 23:02:13 +0800 Subject: [PATCH 43/46] Add files via upload --- config.py | 1 + webui.py | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/config.py b/config.py index 504ca62d..ec846b3c 100644 --- a/config.py +++ b/config.py @@ -5,6 +5,7 @@ import sys sovits_path = "" gpt_path = "" is_half = True +is_share=False cnhubert_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base" bert_path = "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" diff --git a/webui.py b/webui.py index 2c559411..85f18048 100644 --- a/webui.py +++ b/webui.py @@ -33,7 +33,7 @@ import pdb import gradio as gr from subprocess import Popen import signal -from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix +from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share from tools.i18n.i18n import I18nAuto i18n = I18nAuto() from scipy.io import wavfile @@ -119,7 +119,7 @@ def kill_process(pid): def change_label(if_label,path_list): global p_label if(if_label==True and p_label==None): - cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s'%(python_exec,path_list,webui_port_subfix) + cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share) yield i18n("打标工具WebUI已开启") print(cmd) p_label = Popen(cmd, shell=True) @@ -131,7 +131,7 @@ def change_label(if_label,path_list): def change_uvr5(if_uvr5): global p_uvr5 if(if_uvr5==True and p_uvr5==None): - cmd = '"%s" tools/uvr5/webui.py "%s" %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5) + cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share) yield i18n("UVR5已开启") print(cmd) p_uvr5 = Popen(cmd, shell=True) @@ -150,6 +150,7 @@ def change_tts_inference(if_tts,bert_path,cnhubert_base_path,gpu_number,gpt_path os.environ["_CUDA_VISIBLE_DEVICES"]=gpu_number os.environ["is_half"]=str(is_half) os.environ["infer_ttswebui"]=str(webui_port_infer_tts) + os.environ["is_share"]=str(is_share) cmd = '"%s" GPT_SoVITS/inference_webui.py'%(python_exec) yield i18n("TTS推理进程已开启") print(cmd) @@ -656,7 +657,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: label=i18n("*训练集音频文件目录"), # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx", interactive=True, - placeholder=i18n("训练集音频文件目录 拼接 list文件里波形对应的文件名。") + placeholder=i18n("训练集音频文件目录-拼接-list文件里波形对应的文件名(不是全路径)。") ) gr.Markdown(value=i18n("1Aa-文本内容")) with gr.Row(): @@ -737,7 +738,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - share=True, + share=is_share, server_port=webui_port_main, quiet=True, ) From 19ff34260b20149e0316010f75b1c115a3b8f9f3 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 23:02:33 +0800 Subject: [PATCH 44/46] Add files via upload --- GPT_SoVITS/inference_webui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 32c9b4a5..dbc7eb3d 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -12,6 +12,8 @@ bert_path = os.environ.get( ) infer_ttswebui = os.environ.get("infer_ttswebui", 9872) infer_ttswebui = int(infer_ttswebui) +is_share = os.environ.get("is_share", "False") +is_share=eval(is_share) if "_CUDA_VISIBLE_DEVICES" in os.environ: os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] is_half = eval(os.environ.get("is_half", "True")) From da6cab78d1ea45048d4b75ac15dbc92b59734d24 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 23:02:52 +0800 Subject: [PATCH 45/46] Add files via upload --- tools/subfix_webui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/subfix_webui.py b/tools/subfix_webui.py index 4985eef2..ad4907b0 100644 --- a/tools/subfix_webui.py +++ b/tools/subfix_webui.py @@ -296,6 +296,7 @@ def set_global(load_json, load_list, json_key_text, json_key_path, batch): if __name__ == "__main__": parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('--load_json', default="None", help='source file, like demo.json') + parser.add_argument('--is_share', default="False", help='whether webui is_share=True') parser.add_argument('--load_list', default="None", help='source file, like demo.list') parser.add_argument('--webui_port_subfix', default=9871, help='source file, like demo.list') parser.add_argument('--json_key_text', default="text", help='the text key name in json, Default: text') @@ -490,5 +491,6 @@ if __name__ == "__main__": server_name="0.0.0.0", inbrowser=True, quiet=True, + share=eval(args.is_share), server_port=int(args.webui_port_subfix) ) \ No newline at end of file From 2f3ab46bdf79bab568626075823283b8b95dd731 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 21 Jan 2024 23:02:55 +0800 Subject: [PATCH 46/46] Add files via upload --- tools/uvr5/webui.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py index 11b39f5b..59d2d2e9 100644 --- a/tools/uvr5/webui.py +++ b/tools/uvr5/webui.py @@ -19,7 +19,8 @@ for name in os.listdir(weight_uvr5_root): device=sys.argv[1] is_half=sys.argv[2] - +webui_port_uvr5=int(sys.argv[3]) +is_share=eval(sys.argv[4]) def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): infos = [] @@ -179,6 +180,7 @@ with gr.Blocks(title="RVC WebUI") as app: app.queue(concurrency_count=511, max_size=1022).launch( server_name="0.0.0.0", inbrowser=True, - server_port=9873, + share=is_share, + server_port=webui_port_uvr5, quiet=True, ) \ No newline at end of file