diff --git a/GPT_SoVITS/hotwords.json b/GPT_SoVITS/hotwords.json
new file mode 100644
index 00000000..18e7083b
--- /dev/null
+++ b/GPT_SoVITS/hotwords.json
@@ -0,0 +1,62 @@
+{
+ "沪深300": "沪深三百",
+ "2017": "二零一七",
+ "深证成指": "深证成指",
+ "上证50" : "上证五零",
+ "中证1000": "中证一千",
+ "中证2000": "中证两千",
+ "2000年": "两千年",
+ "2001年": "二零零一年",
+ "2002年": "二零零二年",
+ "2003年": "二零零三年",
+ "2004年": "二零零四年",
+ "2005年": "二零零五年",
+ "2006年": "二零零六年",
+ "2007年": "二零零七年",
+ "2008年": "二零零八年",
+ "2009年": "二零零九年",
+ "2010年": "二零一零年",
+ "2011年": "二零一一年",
+ "2012年": "二零一二年",
+ "2013年": "二零一三年",
+ "2014年": "二零一四年",
+ "2015年": "二零一五年",
+ "2016年": "二零一六年",
+ "2017年": "二零一七年",
+ "2018年": "二零一八年",
+ "2019年": "二零一九年",
+ "2020年": "二零二零年",
+ "2021年": "二零二一年",
+ "2022年": "二零二二年",
+ "2023年": "二零二三年",
+ "2024年": "二零二四年",
+ "2025年": "二零二五年",
+ "2026年": "二零二六年",
+ "2027年": "二零二七年",
+ "2028年": "二零二八年",
+ "2029年": "二零二九年",
+ "2030年": "二零三零年",
+ "-": "到",
+ "2010": "二零一零",
+ "2011": "二零一一",
+ "2012": "二零一二",
+ "2013": "二零一三",
+ "2014": "二零一四",
+ "2015": "二零一五",
+ "2016": "二零一六",
+ "2017": "二零一七",
+ "2018": "二零一八",
+ "2019": "二零一九",
+ "2020": "二零二零",
+ "2021": "二零二一",
+ "2022": "二零二二",
+ "2023": "二零二三",
+ "2024": "二零二四",
+ "2025": "二零二五",
+ "2026": "二零二六",
+ "2027": "二零二七",
+ "2028": "二零二八",
+ "2029": "二零二九",
+ "2030": "二零三零"
+
+}
\ No newline at end of file
diff --git a/GPT_SoVITS/inference_webui_checkpoint.py b/GPT_SoVITS/inference_webui_checkpoint.py
new file mode 100644
index 00000000..37c5cbfd
--- /dev/null
+++ b/GPT_SoVITS/inference_webui_checkpoint.py
@@ -0,0 +1,896 @@
+'''
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+'''
+import logging
+import traceback
+
+logging.getLogger("markdown_it").setLevel(logging.ERROR)
+logging.getLogger("urllib3").setLevel(logging.ERROR)
+logging.getLogger("httpcore").setLevel(logging.ERROR)
+logging.getLogger("httpx").setLevel(logging.ERROR)
+logging.getLogger("asyncio").setLevel(logging.ERROR)
+logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
+logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
+logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
+import LangSegment, os, re, sys, json
+import pdb
+import torch
+from scipy.io.wavfile import write
+import shutil
+import subprocess
+
+output_dir='生成记录'
+os.makedirs(output_dir,exist_ok=True)
+
+version=os.environ.get("version","v2")
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):
+ _[0].append(pretrained_gpt_name[i])
+ if os.path.exists(pretrained_sovits_name[i]):
+ _[-1].append(pretrained_sovits_name[i])
+pretrained_gpt_name,pretrained_sovits_name = _
+
+
+
+if os.path.exists(f"./weight.json"):
+ pass
+else:
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
+
+with open(f"./weight.json", 'r', encoding="utf-8") as file:
+ weight_data = file.read()
+ weight_data=json.loads(weight_data)
+ gpt_path = os.environ.get(
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
+ sovits_path = os.environ.get(
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
+ if isinstance(gpt_path,list):
+ gpt_path = gpt_path[0]
+ if isinstance(sovits_path,list):
+ sovits_path = sovits_path[0]
+
+# gpt_path = os.environ.get(
+# "gpt_path", pretrained_gpt_name
+# )
+# sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
+cnhubert_base_path = os.environ.get(
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
+)
+bert_path = os.environ.get(
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
+)
+infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
+infer_ttswebui = int(infer_ttswebui)
+is_share = os.environ.get("is_share", "False")
+is_share = eval(is_share)
+if "_CUDA_VISIBLE_DEVICES" in os.environ:
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
+is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
+punctuation = set(['!', '?', '…', ',', '.', '-'," "])
+import gradio as gr
+from transformers import AutoModelForMaskedLM, AutoTokenizer
+import numpy as np
+import librosa
+from feature_extractor import cnhubert
+
+cnhubert.cnhubert_base_path = cnhubert_base_path
+
+from module.models import SynthesizerTrn
+from AR.models.t2s_lightning_module import Text2SemanticLightningModule
+from text import cleaned_text_to_sequence
+from text.cleaner import clean_text
+from time import time as ttime
+from module.mel_processing import spectrogram_torch
+from tools.my_utils import load_audio
+from tools.i18n.i18n import I18nAuto, scan_language_list
+
+language=os.environ.get("language","Auto")
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
+i18n = I18nAuto(language=language)
+
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
+
+if torch.cuda.is_available():
+ device = "cuda"
+else:
+ device = "cpu"
+
+dict_language_v1 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+}
+dict_language_v2 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("粤语"): "all_yue",#全部按中文识别
+ i18n("韩文"): "all_ko",#全部按韩文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
+}
+dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+
+tokenizer = AutoTokenizer.from_pretrained(bert_path)
+bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
+if is_half == True:
+ bert_model = bert_model.half().to(device)
+else:
+ bert_model = bert_model.to(device)
+
+
+def get_bert_feature(text, word2ph):
+ with torch.no_grad():
+ inputs = tokenizer(text, return_tensors="pt")
+ for i in inputs:
+ inputs[i] = inputs[i].to(device)
+ res = bert_model(**inputs, output_hidden_states=True)
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
+ assert len(word2ph) == len(text)
+ phone_level_feature = []
+ for i in range(len(word2ph)):
+ repeat_feature = res[i].repeat(word2ph[i], 1)
+ phone_level_feature.append(repeat_feature)
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
+ return phone_level_feature.T
+
+
+class DictToAttrRecursive(dict):
+ def __init__(self, input_dict):
+ super().__init__(input_dict)
+ for key, value in input_dict.items():
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ self[key] = value
+ setattr(self, key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+ def __setattr__(self, key, value):
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ super(DictToAttrRecursive, self).__setitem__(key, value)
+ super().__setattr__(key, value)
+
+ def __delattr__(self, item):
+ try:
+ del self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+
+ssl_model = cnhubert.get_model()
+if is_half == True:
+ ssl_model = ssl_model.half().to(device)
+else:
+ ssl_model = ssl_model.to(device)
+
+
+def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
+ global vq_model, hps, version, dict_language
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
+ hps = dict_s2["config"]
+ hps = DictToAttrRecursive(hps)
+ hps.model.semantic_frame_rate = "25hz"
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
+ hps.model.version = "v1"
+ else:
+ hps.model.version = "v2"
+ version = hps.model.version
+ # print("sovits版本:",hps.model.version)
+ vq_model = SynthesizerTrn(
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model
+ )
+ if ("pretrained" not in sovits_path):
+ del vq_model.enc_q
+ if is_half == True:
+ vq_model = vq_model.half().to(device)
+ else:
+ vq_model = vq_model.to(device)
+ vq_model.eval()
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["SoVITS"][version]=sovits_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+ if prompt_language is not None and text_language is not None:
+ if prompt_language in list(dict_language.keys()):
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
+ else:
+ prompt_text_update = {'__type__':'update', 'value':''}
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
+ if text_language in list(dict_language.keys()):
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
+ else:
+ text_update = {'__type__':'update', 'value':''}
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
+
+
+
+change_sovits_weights(sovits_path)
+
+
+def change_gpt_weights(gpt_path):
+ global hz, max_sec, t2s_model, config
+ hz = 50
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
+ config = dict_s1["config"]
+ max_sec = config["data"]["max_sec"]
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
+ t2s_model.load_state_dict(dict_s1["weight"])
+ if is_half == True:
+ t2s_model = t2s_model.half()
+ t2s_model = t2s_model.to(device)
+ t2s_model.eval()
+ total = sum([param.nelement() for param in t2s_model.parameters()])
+ print("Number of parameter: %.2fM" % (total / 1e6))
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["GPT"][version]=gpt_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+
+
+change_gpt_weights(gpt_path)
+
+
+def get_spepc(hps, filename):
+ audio = load_audio(filename, int(hps.data.sampling_rate))
+ audio = torch.FloatTensor(audio)
+ maxx=audio.abs().max()
+ if(maxx>1):audio/=min(2,maxx)
+ audio_norm = audio
+ audio_norm = audio_norm.unsqueeze(0)
+ spec = spectrogram_torch(
+ audio_norm,
+ hps.data.filter_length,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ center=False,
+ )
+ return spec
+
+def clean_text_inf(text, language, version):
+ phones, word2ph, norm_text = clean_text(text, language, version)
+ phones = cleaned_text_to_sequence(phones, version)
+ return phones, word2ph, norm_text
+
+dtype=torch.float16 if is_half == True else torch.float32
+def get_bert_inf(phones, word2ph, norm_text, language):
+ language=language.replace("all_","")
+ if language == "zh":
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
+ else:
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+
+ return bert
+
+
+splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
+
+
+def get_first(text):
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
+ text = re.split(pattern, text)[0].strip()
+ return text
+
+from text import chinese
+def get_phones_and_bert(text,language,version,final=False):
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
+ language = language.replace("all_","")
+ if language == "en":
+ LangSegment.setfilters(["en"])
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ formattext = text
+ while " " in formattext:
+ formattext = formattext.replace(" ", " ")
+ if language == "zh":
+ if re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"zh",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = get_bert_feature(norm_text, word2ph).to(device)
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"yue",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
+ textlist=[]
+ langlist=[]
+ LangSegment.setfilters(["zh","ja","en","ko"])
+ if language == "auto":
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ elif language == "auto_yue":
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "zh":
+ tmp["lang"] = "yue"
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ else:
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "en":
+ langlist.append(tmp["lang"])
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ langlist.append(language)
+ textlist.append(tmp["text"])
+ print(textlist)
+ print(langlist)
+ phones_list = []
+ bert_list = []
+ norm_text_list = []
+ for i in range(len(textlist)):
+ lang = langlist[i]
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
+ phones_list.append(phones)
+ norm_text_list.append(norm_text)
+ bert_list.append(bert)
+ bert = torch.cat(bert_list, dim=1)
+ phones = sum(phones_list, [])
+ norm_text = ''.join(norm_text_list)
+
+ if not final and len(phones) < 6:
+ return get_phones_and_bert("." + text,language,version,final=True)
+
+ return phones,bert.to(dtype),norm_text
+
+
+def merge_short_text_in_array(texts, threshold):
+ if (len(texts)) < 2:
+ return texts
+ result = []
+ text = ""
+ for ele in texts:
+ text += ele
+ if len(text) >= threshold:
+ result.append(text)
+ text = ""
+ if (len(text) > 0):
+ if len(result) == 0:
+ result.append(text)
+ else:
+ result[len(result) - 1] += text
+ return result
+
+##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
+# cache_tokens={}#暂未实现清理机制
+cache= {}
+def get_tts_wav(state,ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123,pause_duration=0.5):
+ global cache
+ file_name = text[:30]
+ file_name = file_name.strip()
+ if ref_wav_path:pass
+ else:gr.Warning(i18n('请上传参考音频'))
+ if text:pass
+ else:gr.Warning(i18n('请填入推理文本'))
+ t = []
+ if prompt_text is None or len(prompt_text) == 0:
+ ref_free = True
+ t0 = ttime()
+ prompt_language = dict_language[prompt_language]
+ text_language = dict_language[text_language]
+
+
+ if not ref_free:
+ prompt_text = prompt_text.strip("\n")
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
+ print(i18n("实际输入的参考文本:"), prompt_text)
+ text = text.strip("\n")
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
+
+ print(i18n("实际输入的目标文本:"), text)
+ zero_wav = np.zeros(
+ int(hps.data.sampling_rate * 0.3),
+ dtype=np.float16 if is_half == True else np.float32,
+ )
+ # zero_wav1 = np.zeros(
+ # int(hps.data.sampling_rate * 0.5),
+ # dtype=np.float16 if is_half == True else np.float32,
+ # )
+ # (by-katana 重新生成0填充音频片段,传参加一个pause_duration)
+ pause_samples = int(hps.data.sampling_rate * pause_duration)
+ zero_wav1 = np.zeros(pause_samples, dtype=np.float16 if is_half else np.float32)
+ if not ref_free:
+ with torch.no_grad():
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
+ wav16k = torch.from_numpy(wav16k)
+ zero_wav_torch = torch.from_numpy(zero_wav)
+ if is_half == True:
+ wav16k = wav16k.half().to(device)
+ zero_wav_torch = zero_wav_torch.half().to(device)
+ else:
+ wav16k = wav16k.to(device)
+ zero_wav_torch = zero_wav_torch.to(device)
+ wav16k = torch.cat([wav16k, zero_wav_torch])
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
+ "last_hidden_state"
+ ].transpose(
+ 1, 2
+ ) # .float()
+ codes = vq_model.extract_latent(ssl_content)
+ prompt_semantic = codes[0, 0]
+ prompt = prompt_semantic.unsqueeze(0).to(device)
+
+ t1 = ttime()
+ t.append(t1-t0)
+
+ if (how_to_cut == i18n("凑四句一切")):
+ text = cut1(text)
+ elif (how_to_cut == i18n("凑50字一切")):
+ text = cut2(text)
+ elif (how_to_cut == i18n("按中文句号。切")):
+ text = cut3(text)
+ elif (how_to_cut == i18n("按英文句号.切")):
+ text = cut4(text)
+ elif (how_to_cut == i18n("按标点符号切")):
+ text = cut5(text)
+ while "\n\n" in text:
+ text = text.replace("\n\n", "\n")
+ print(i18n("实际输入的目标文本(切句后):"), text)
+ texts = text.split("\n")
+ texts = process_text(texts)
+ texts = merge_short_text_in_array(texts, 5)
+ audio_opt = []
+ if not ref_free:
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
+
+ for i_text,text in enumerate(texts):
+ # 解决输入目标文本的空行导致报错的问题
+ if (len(text.strip()) == 0):
+ continue
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
+ print(i18n("实际输入的目标文本(每句):"), text)
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
+ if not ref_free:
+ bert = torch.cat([bert1, bert2], 1)
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
+ else:
+ bert = bert2
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
+
+ bert = bert.to(device).unsqueeze(0)
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
+
+ t2 = ttime()
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
+ # print(cache.keys(),if_freeze)
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
+ else:
+ with torch.no_grad():
+ pred_semantic, idx = t2s_model.model.infer_panel(
+ all_phoneme_ids,
+ all_phoneme_len,
+ None if ref_free else prompt,
+ bert,
+ # prompt_phone_len=ph_offset,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ early_stop_num=hz * max_sec,
+ )
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
+ cache[i_text]=pred_semantic
+ t3 = ttime()
+ refers=[]
+ if(inp_refs):
+ for path in inp_refs:
+ try:
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
+ refers.append(refer)
+ except:
+ traceback.print_exc()
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
+ if max_audio>1:audio/=max_audio
+ audio_opt.append(audio)
+ # (by - katana,append新的zero_wave)
+ audio_opt.append(zero_wav1)
+ t4 = ttime()
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
+ t1 = ttime()
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
+ )
+ concatenated_audio = np.concatenate(audio_opt, 0)
+ if state=='generate':
+ file_path = os.path.join(output_dir, f"main_{file_name}.wav")
+ write(file_path, hps.data.sampling_rate, (concatenated_audio * 32768).astype(np.int16))
+ elif state=='regenerate':
+ file_path = os.path.join(output_dir, f"{file_name}.wav")
+ write(file_path, hps.data.sampling_rate, (concatenated_audio * 32768).astype(np.int16))
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
+ np.int16
+ )
+
+
+
+def split(todo_text):
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
+ if todo_text[-1] not in splits:
+ todo_text += "。"
+ i_split_head = i_split_tail = 0
+ len_text = len(todo_text)
+ todo_texts = []
+ while 1:
+ if i_split_head >= len_text:
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
+ if todo_text[i_split_head] in splits:
+ i_split_head += 1
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
+ i_split_tail = i_split_head
+ else:
+ i_split_head += 1
+ return todo_texts
+
+
+def cut1(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ split_idx = list(range(0, len(inps), 4))
+ split_idx[-1] = None
+ if len(split_idx) > 1:
+ opts = []
+ for idx in range(len(split_idx) - 1):
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
+ else:
+ opts = [inp]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut2(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ if len(inps) < 2:
+ return inp
+ opts = []
+ summ = 0
+ tmp_str = ""
+ for i in range(len(inps)):
+ summ += len(inps[i])
+ tmp_str += inps[i]
+ if summ > 50:
+ summ = 0
+ opts.append(tmp_str)
+ tmp_str = ""
+ if tmp_str != "":
+ opts.append(tmp_str)
+ # print(opts)
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
+ opts[-2] = opts[-2] + opts[-1]
+ opts = opts[:-1]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut3(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+def cut4(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
+def cut5(inp):
+ inp = inp.strip("\n")
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
+ mergeitems = []
+ items = []
+
+ for i, char in enumerate(inp):
+ if char in punds:
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
+ items.append(char)
+ else:
+ items.append(char)
+ mergeitems.append("".join(items))
+ items = []
+ else:
+ items.append(char)
+
+ if items:
+ mergeitems.append("".join(items))
+
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
+ return "\n".join(opt)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def process_text(texts):
+ _text=[]
+ if all(text in [None, " ", "\n",""] for text in texts):
+ raise ValueError(i18n("请输入有效文本"))
+ for text in texts:
+ if text in [None, " ", ""]:
+ pass
+ else:
+ _text.append(text)
+ return _text
+
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
+
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def get_weights_names(GPT_weight_root, SoVITS_weight_root):
+ SoVITS_names = [i for i in pretrained_sovits_name]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [i for i in pretrained_gpt_name]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+
+SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+
+def html_center(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+def html_left(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+# (by - katana增加参考音频列表)
+reference_wavs=["选择参考音频或自己上传"]
+for name in os.listdir("./refwavs"):
+ reference_wavs.append(name)
+#不一定有用 更新下拉列表
+def change_ref_choices():
+ reference_wavs = [i18n("Please select the reference audio or upload it yourself.")]
+ for name in os.listdir("./refwavs/"):
+ reference_wavs.append(name)
+ return {"choices":reference_wavs, "__type__": "update"}
+
+
+# (by - katana 参考音频列表移除[])
+def replace_speaker(text):
+ return re.sub(r"\[.*?\]", "", text, flags=re.UNICODE)
+
+def change_wav(audio_path):
+ text = audio_path.replace(".wav","").replace(".mp3","").replace(".WAV","")
+ text = replace_speaker(text)
+ return f"./refwavs/{audio_path}",text
+
+#(by - katana 切分文本发送到下面文字框中)
+def split_text_and_fill_boxes(input_text, *textboxes):
+ sentences = []
+ # 按句号切分文本
+ for line in input_text.split('\n'):
+ sentences.extend(line.split('。'))
+ # 去除空字符串
+ sentences = [sentence.strip()+'。' for sentence in sentences if sentence.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 按照段落切分)
+def split_by_n(input_text, *textboxes):
+ # 去掉全角括号及括号里的内容
+ cleaned_text = re.sub(r"\(.*?\)", "", input_text)
+ # 按换行符切分文本
+ sentences = [line.strip() for line in cleaned_text.split('\n') if line.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 热词替换 需要同级目录有一个hotword.json文件)
+def hot_replace(input_text):
+ with open('./GPT_SoVITS/hotwords.json', 'r', encoding='utf-8') as file:
+ hotwords_dict = json.load(file)
+ for key, value in hotwords_dict.items():
+ input_text = input_text.replace(key, value)
+ return gr.update(value=input_text)
+
+def open_folder_callback():
+ folder_path = "生成记录" # 你可以替换为实际的文件夹路径
+ subprocess.run(['explorer', folder_path])
+
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ state=gr.State("generate")
+ gr.Markdown(
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ with gr.Tabs():
+ with gr.TabItem(i18n("语音生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ with gr.Group():
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=16):
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
+ with gr.Row():
+ #(by - katana 参考音频列表)
+ wavs_dropdown = gr.Dropdown(label="参考音频列表",choices=reference_wavs,value="选择参考音频或者自己上传",interactive=True,scale=5)
+ refresh_ref_button=gr.Button("刷新",scale=1)
+ refresh_ref_button.click(fn=change_ref_choices,inputs=[],outputs=wavs_dropdown)
+ with gr.Column(scale=13):
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True,scale=1)
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。
开启后无视填写的参考文本。")))
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
+ wavs_dropdown.change(change_wav,[wavs_dropdown],[inp_ref,prompt_text])
+ with gr.Column(scale=14):
+ prompt_language = gr.Dropdown(
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
+ )
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=13):
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
+ with gr.Column(scale=7):
+ text_language = gr.Dropdown(
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
+ )
+ how_to_cut = gr.Dropdown(
+ label=i18n("怎么切"),
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
+ value=i18n("按中文句号。切"),
+ interactive=True, scale=1
+ )
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
+ pause_duration = gr.Slider(
+ minimum=0.01,
+ maximum=2,
+ step=0.01,
+ label=i18n("音频片段间隔时长(秒)"),
+ value=0.2,
+ interactive=True,
+ scale=1
+ )
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k(采样率,越高语气越丰富)"),value=15,interactive=True, scale=1)
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p(建议不动)"),value=1,interactive=True, scale=1)
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature(越低越稳定,但是语气稍有平淡)"),value=1,interactive=True, scale=1)
+ # (by - katana 热词替换按钮)
+ replace_hot_words = gr.Button("替换热词")
+ replace_hot_words.click(fn=hot_replace, inputs=text, outputs=text)
+ # with gr.Column():
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
+ with gr.Row():
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
+ open_folder = gr.Button(i18n("打开生成文件夹"), variant="primary", size='lg', scale=25)
+ open_folder.click(open_folder_callback)
+
+ inference_button.click(
+ get_tts_wav,
+ [state,inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration],
+ [output],
+ )
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
+
+ # (by - katana 校对标签页)
+ with gr.TabItem(i18n("校对生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ split_button = gr.Button(value="切分文本(前一页整段按句号切分,方便单独生成)", variant="primary", size='lg')
+ # (by - katana 按段落切分按钮)
+ split_button2 = gr.Button(value="按段落切分", variant="primary", size='lg')
+
+ with gr.Accordion("重新生成列表"):
+ textboxes = []
+ for i in range(100): # 创建200个水平布局
+ with gr.Row(): # 每行都是一个新的水平布局
+ text_input = gr.Textbox(label=f"切分文本 {i + 1}", scale=4)
+ button = gr.Button(value="重新生成", scale=1)
+ audio_output = gr.Audio(scale=2)
+ # 将按钮与函数绑定,传递文本框的内容作为输入
+ button.click(fn=get_tts_wav, inputs=[gr.State("regenerate"),inp_ref, prompt_text, prompt_language, text_input, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration], outputs=audio_output)
+ textboxes.append(text_input)
+ split_button.click(fn=split_text_and_fill_boxes, inputs=[text, *textboxes], outputs=textboxes)
+ split_button2.click(fn=split_by_n, inputs=[text, *textboxes], outputs=textboxes)
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
+ # with gr.Row():
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
+ # button1.click(cut1, [text_inp], [text_opt])
+ # button2.click(cut2, [text_inp], [text_opt])
+ # button3.click(cut3, [text_inp], [text_opt])
+ # button4.click(cut4, [text_inp], [text_opt])
+ # button5.click(cut5, [text_inp], [text_opt])
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
+
+if __name__ == '__main__':
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=infer_ttswebui,
+ quiet=True,
+ )
diff --git a/GPT_SoVITS/inference_webui_checkpoint_backup.py b/GPT_SoVITS/inference_webui_checkpoint_backup.py
new file mode 100644
index 00000000..61bb9b29
--- /dev/null
+++ b/GPT_SoVITS/inference_webui_checkpoint_backup.py
@@ -0,0 +1,872 @@
+'''
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+'''
+import logging
+import traceback
+
+logging.getLogger("markdown_it").setLevel(logging.ERROR)
+logging.getLogger("urllib3").setLevel(logging.ERROR)
+logging.getLogger("httpcore").setLevel(logging.ERROR)
+logging.getLogger("httpx").setLevel(logging.ERROR)
+logging.getLogger("asyncio").setLevel(logging.ERROR)
+logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
+logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
+logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
+import LangSegment, os, re, sys, json
+import pdb
+import torch
+
+version=os.environ.get("version","v2")
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):
+ _[0].append(pretrained_gpt_name[i])
+ if os.path.exists(pretrained_sovits_name[i]):
+ _[-1].append(pretrained_sovits_name[i])
+pretrained_gpt_name,pretrained_sovits_name = _
+
+
+
+if os.path.exists(f"./weight.json"):
+ pass
+else:
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
+
+with open(f"./weight.json", 'r', encoding="utf-8") as file:
+ weight_data = file.read()
+ weight_data=json.loads(weight_data)
+ gpt_path = os.environ.get(
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
+ sovits_path = os.environ.get(
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
+ if isinstance(gpt_path,list):
+ gpt_path = gpt_path[0]
+ if isinstance(sovits_path,list):
+ sovits_path = sovits_path[0]
+
+# gpt_path = os.environ.get(
+# "gpt_path", pretrained_gpt_name
+# )
+# sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
+cnhubert_base_path = os.environ.get(
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
+)
+bert_path = os.environ.get(
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
+)
+infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
+infer_ttswebui = int(infer_ttswebui)
+is_share = os.environ.get("is_share", "False")
+is_share = eval(is_share)
+if "_CUDA_VISIBLE_DEVICES" in os.environ:
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
+is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
+punctuation = set(['!', '?', '…', ',', '.', '-'," "])
+import gradio as gr
+from transformers import AutoModelForMaskedLM, AutoTokenizer
+import numpy as np
+import librosa
+from feature_extractor import cnhubert
+
+cnhubert.cnhubert_base_path = cnhubert_base_path
+
+from module.models import SynthesizerTrn
+from AR.models.t2s_lightning_module import Text2SemanticLightningModule
+from text import cleaned_text_to_sequence
+from text.cleaner import clean_text
+from time import time as ttime
+from module.mel_processing import spectrogram_torch
+from tools.my_utils import load_audio
+from tools.i18n.i18n import I18nAuto, scan_language_list
+
+language=os.environ.get("language","Auto")
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
+i18n = I18nAuto(language=language)
+
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
+
+if torch.cuda.is_available():
+ device = "cuda"
+else:
+ device = "cpu"
+
+dict_language_v1 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+}
+dict_language_v2 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("粤语"): "all_yue",#全部按中文识别
+ i18n("韩文"): "all_ko",#全部按韩文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
+}
+dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+
+tokenizer = AutoTokenizer.from_pretrained(bert_path)
+bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
+if is_half == True:
+ bert_model = bert_model.half().to(device)
+else:
+ bert_model = bert_model.to(device)
+
+
+def get_bert_feature(text, word2ph):
+ with torch.no_grad():
+ inputs = tokenizer(text, return_tensors="pt")
+ for i in inputs:
+ inputs[i] = inputs[i].to(device)
+ res = bert_model(**inputs, output_hidden_states=True)
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
+ assert len(word2ph) == len(text)
+ phone_level_feature = []
+ for i in range(len(word2ph)):
+ repeat_feature = res[i].repeat(word2ph[i], 1)
+ phone_level_feature.append(repeat_feature)
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
+ return phone_level_feature.T
+
+
+class DictToAttrRecursive(dict):
+ def __init__(self, input_dict):
+ super().__init__(input_dict)
+ for key, value in input_dict.items():
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ self[key] = value
+ setattr(self, key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+ def __setattr__(self, key, value):
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ super(DictToAttrRecursive, self).__setitem__(key, value)
+ super().__setattr__(key, value)
+
+ def __delattr__(self, item):
+ try:
+ del self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+
+ssl_model = cnhubert.get_model()
+if is_half == True:
+ ssl_model = ssl_model.half().to(device)
+else:
+ ssl_model = ssl_model.to(device)
+
+
+def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
+ global vq_model, hps, version, dict_language
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
+ hps = dict_s2["config"]
+ hps = DictToAttrRecursive(hps)
+ hps.model.semantic_frame_rate = "25hz"
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
+ hps.model.version = "v1"
+ else:
+ hps.model.version = "v2"
+ version = hps.model.version
+ # print("sovits版本:",hps.model.version)
+ vq_model = SynthesizerTrn(
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model
+ )
+ if ("pretrained" not in sovits_path):
+ del vq_model.enc_q
+ if is_half == True:
+ vq_model = vq_model.half().to(device)
+ else:
+ vq_model = vq_model.to(device)
+ vq_model.eval()
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["SoVITS"][version]=sovits_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+ if prompt_language is not None and text_language is not None:
+ if prompt_language in list(dict_language.keys()):
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
+ else:
+ prompt_text_update = {'__type__':'update', 'value':''}
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
+ if text_language in list(dict_language.keys()):
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
+ else:
+ text_update = {'__type__':'update', 'value':''}
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
+
+
+
+change_sovits_weights(sovits_path)
+
+
+def change_gpt_weights(gpt_path):
+ global hz, max_sec, t2s_model, config
+ hz = 50
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
+ config = dict_s1["config"]
+ max_sec = config["data"]["max_sec"]
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
+ t2s_model.load_state_dict(dict_s1["weight"])
+ if is_half == True:
+ t2s_model = t2s_model.half()
+ t2s_model = t2s_model.to(device)
+ t2s_model.eval()
+ total = sum([param.nelement() for param in t2s_model.parameters()])
+ print("Number of parameter: %.2fM" % (total / 1e6))
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["GPT"][version]=gpt_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+
+
+change_gpt_weights(gpt_path)
+
+
+def get_spepc(hps, filename):
+ audio = load_audio(filename, int(hps.data.sampling_rate))
+ audio = torch.FloatTensor(audio)
+ maxx=audio.abs().max()
+ if(maxx>1):audio/=min(2,maxx)
+ audio_norm = audio
+ audio_norm = audio_norm.unsqueeze(0)
+ spec = spectrogram_torch(
+ audio_norm,
+ hps.data.filter_length,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ center=False,
+ )
+ return spec
+
+def clean_text_inf(text, language, version):
+ phones, word2ph, norm_text = clean_text(text, language, version)
+ phones = cleaned_text_to_sequence(phones, version)
+ return phones, word2ph, norm_text
+
+dtype=torch.float16 if is_half == True else torch.float32
+def get_bert_inf(phones, word2ph, norm_text, language):
+ language=language.replace("all_","")
+ if language == "zh":
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
+ else:
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+
+ return bert
+
+
+splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
+
+
+def get_first(text):
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
+ text = re.split(pattern, text)[0].strip()
+ return text
+
+from text import chinese
+def get_phones_and_bert(text,language,version,final=False):
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
+ language = language.replace("all_","")
+ if language == "en":
+ LangSegment.setfilters(["en"])
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ formattext = text
+ while " " in formattext:
+ formattext = formattext.replace(" ", " ")
+ if language == "zh":
+ if re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"zh",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = get_bert_feature(norm_text, word2ph).to(device)
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"yue",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
+ textlist=[]
+ langlist=[]
+ LangSegment.setfilters(["zh","ja","en","ko"])
+ if language == "auto":
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ elif language == "auto_yue":
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "zh":
+ tmp["lang"] = "yue"
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ else:
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "en":
+ langlist.append(tmp["lang"])
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ langlist.append(language)
+ textlist.append(tmp["text"])
+ print(textlist)
+ print(langlist)
+ phones_list = []
+ bert_list = []
+ norm_text_list = []
+ for i in range(len(textlist)):
+ lang = langlist[i]
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
+ phones_list.append(phones)
+ norm_text_list.append(norm_text)
+ bert_list.append(bert)
+ bert = torch.cat(bert_list, dim=1)
+ phones = sum(phones_list, [])
+ norm_text = ''.join(norm_text_list)
+
+ if not final and len(phones) < 6:
+ return get_phones_and_bert("." + text,language,version,final=True)
+
+ return phones,bert.to(dtype),norm_text
+
+
+def merge_short_text_in_array(texts, threshold):
+ if (len(texts)) < 2:
+ return texts
+ result = []
+ text = ""
+ for ele in texts:
+ text += ele
+ if len(text) >= threshold:
+ result.append(text)
+ text = ""
+ if (len(text) > 0):
+ if len(result) == 0:
+ result.append(text)
+ else:
+ result[len(result) - 1] += text
+ return result
+
+##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
+# cache_tokens={}#暂未实现清理机制
+cache= {}
+def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123,pause_duration=0.5):
+ global cache
+ if ref_wav_path:pass
+ else:gr.Warning(i18n('请上传参考音频'))
+ if text:pass
+ else:gr.Warning(i18n('请填入推理文本'))
+ t = []
+ if prompt_text is None or len(prompt_text) == 0:
+ ref_free = True
+ t0 = ttime()
+ prompt_language = dict_language[prompt_language]
+ text_language = dict_language[text_language]
+
+
+ if not ref_free:
+ prompt_text = prompt_text.strip("\n")
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
+ print(i18n("实际输入的参考文本:"), prompt_text)
+ text = text.strip("\n")
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
+
+ print(i18n("实际输入的目标文本:"), text)
+ zero_wav = np.zeros(
+ int(hps.data.sampling_rate * 0.3),
+ dtype=np.float16 if is_half == True else np.float32,
+ )
+ # zero_wav1 = np.zeros(
+ # int(hps.data.sampling_rate * 0.5),
+ # dtype=np.float16 if is_half == True else np.float32,
+ # )
+ # (by-katana 重新生成0填充音频片段,传参加一个pause_duration)
+ pause_samples = int(hps.data.sampling_rate * pause_duration)
+ zero_wav1 = np.zeros(pause_samples, dtype=np.float16 if is_half else np.float32)
+ if not ref_free:
+ with torch.no_grad():
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
+ wav16k = torch.from_numpy(wav16k)
+ zero_wav_torch = torch.from_numpy(zero_wav)
+ if is_half == True:
+ wav16k = wav16k.half().to(device)
+ zero_wav_torch = zero_wav_torch.half().to(device)
+ else:
+ wav16k = wav16k.to(device)
+ zero_wav_torch = zero_wav_torch.to(device)
+ wav16k = torch.cat([wav16k, zero_wav_torch])
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
+ "last_hidden_state"
+ ].transpose(
+ 1, 2
+ ) # .float()
+ codes = vq_model.extract_latent(ssl_content)
+ prompt_semantic = codes[0, 0]
+ prompt = prompt_semantic.unsqueeze(0).to(device)
+
+ t1 = ttime()
+ t.append(t1-t0)
+
+ if (how_to_cut == i18n("凑四句一切")):
+ text = cut1(text)
+ elif (how_to_cut == i18n("凑50字一切")):
+ text = cut2(text)
+ elif (how_to_cut == i18n("按中文句号。切")):
+ text = cut3(text)
+ elif (how_to_cut == i18n("按英文句号.切")):
+ text = cut4(text)
+ elif (how_to_cut == i18n("按标点符号切")):
+ text = cut5(text)
+ while "\n\n" in text:
+ text = text.replace("\n\n", "\n")
+ print(i18n("实际输入的目标文本(切句后):"), text)
+ texts = text.split("\n")
+ texts = process_text(texts)
+ texts = merge_short_text_in_array(texts, 5)
+ audio_opt = []
+ if not ref_free:
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
+
+ for i_text,text in enumerate(texts):
+ # 解决输入目标文本的空行导致报错的问题
+ if (len(text.strip()) == 0):
+ continue
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
+ print(i18n("实际输入的目标文本(每句):"), text)
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
+ if not ref_free:
+ bert = torch.cat([bert1, bert2], 1)
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
+ else:
+ bert = bert2
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
+
+ bert = bert.to(device).unsqueeze(0)
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
+
+ t2 = ttime()
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
+ # print(cache.keys(),if_freeze)
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
+ else:
+ with torch.no_grad():
+ pred_semantic, idx = t2s_model.model.infer_panel(
+ all_phoneme_ids,
+ all_phoneme_len,
+ None if ref_free else prompt,
+ bert,
+ # prompt_phone_len=ph_offset,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ early_stop_num=hz * max_sec,
+ )
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
+ cache[i_text]=pred_semantic
+ t3 = ttime()
+ refers=[]
+ if(inp_refs):
+ for path in inp_refs:
+ try:
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
+ refers.append(refer)
+ except:
+ traceback.print_exc()
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
+ if max_audio>1:audio/=max_audio
+ audio_opt.append(audio)
+ # (by - katana,append新的zero_wave)
+ audio_opt.append(zero_wav1)
+ t4 = ttime()
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
+ t1 = ttime()
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
+ )
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
+ np.int16
+ )
+
+def split(todo_text):
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
+ if todo_text[-1] not in splits:
+ todo_text += "。"
+ i_split_head = i_split_tail = 0
+ len_text = len(todo_text)
+ todo_texts = []
+ while 1:
+ if i_split_head >= len_text:
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
+ if todo_text[i_split_head] in splits:
+ i_split_head += 1
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
+ i_split_tail = i_split_head
+ else:
+ i_split_head += 1
+ return todo_texts
+
+
+def cut1(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ split_idx = list(range(0, len(inps), 4))
+ split_idx[-1] = None
+ if len(split_idx) > 1:
+ opts = []
+ for idx in range(len(split_idx) - 1):
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
+ else:
+ opts = [inp]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut2(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ if len(inps) < 2:
+ return inp
+ opts = []
+ summ = 0
+ tmp_str = ""
+ for i in range(len(inps)):
+ summ += len(inps[i])
+ tmp_str += inps[i]
+ if summ > 50:
+ summ = 0
+ opts.append(tmp_str)
+ tmp_str = ""
+ if tmp_str != "":
+ opts.append(tmp_str)
+ # print(opts)
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
+ opts[-2] = opts[-2] + opts[-1]
+ opts = opts[:-1]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut3(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+def cut4(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
+def cut5(inp):
+ inp = inp.strip("\n")
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
+ mergeitems = []
+ items = []
+
+ for i, char in enumerate(inp):
+ if char in punds:
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
+ items.append(char)
+ else:
+ items.append(char)
+ mergeitems.append("".join(items))
+ items = []
+ else:
+ items.append(char)
+
+ if items:
+ mergeitems.append("".join(items))
+
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
+ return "\n".join(opt)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def process_text(texts):
+ _text=[]
+ if all(text in [None, " ", "\n",""] for text in texts):
+ raise ValueError(i18n("请输入有效文本"))
+ for text in texts:
+ if text in [None, " ", ""]:
+ pass
+ else:
+ _text.append(text)
+ return _text
+
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
+
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def get_weights_names(GPT_weight_root, SoVITS_weight_root):
+ SoVITS_names = [i for i in pretrained_sovits_name]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [i for i in pretrained_gpt_name]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+
+SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+
+def html_center(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+def html_left(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+# (by - katana增加参考音频列表)
+reference_wavs=["选择参考音频或自己上传"]
+for name in os.listdir("./refwavs"):
+ reference_wavs.append(name)
+#不一定有用 更新下拉列表
+def change_ref_choices():
+ reference_wavs = [i18n("Please select the reference audio or upload it yourself.")]
+ for name in os.listdir("./refwavs/"):
+ reference_wavs.append(name)
+ return {"choices":reference_wavs, "__type__": "update"}
+
+
+# (by - katana 参考音频列表移除[])
+def replace_speaker(text):
+ return re.sub(r"\[.*?\]", "", text, flags=re.UNICODE)
+
+def change_wav(audio_path):
+ text = audio_path.replace(".wav","").replace(".mp3","").replace(".WAV","")
+ text = replace_speaker(text)
+ return f"./refwavs/{audio_path}",text
+
+#(by - katana 切分文本发送到下面文字框中)
+def split_text_and_fill_boxes(input_text, *textboxes):
+ sentences = []
+ # 按句号切分文本
+ for line in input_text.split('\n'):
+ sentences.extend(line.split('。'))
+ # 去除空字符串
+ sentences = [sentence.strip()+'。' for sentence in sentences if sentence.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 按照段落切分)
+def split_by_n(input_text, *textboxes):
+ # 去掉全角括号及括号里的内容
+ cleaned_text = re.sub(r"\(.*?\)", "", input_text)
+ # 按换行符切分文本
+ sentences = [line.strip() for line in cleaned_text.split('\n') if line.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 热词替换 需要同级目录有一个hotword.json文件)
+def hot_replace(input_text):
+ with open('./GPT_SoVITS/hotwords.json', 'r', encoding='utf-8') as file:
+ hotwords_dict = json.load(file)
+ for key, value in hotwords_dict.items():
+ input_text = input_text.replace(key, value)
+ return gr.update(value=input_text)
+
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ gr.Markdown(
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ with gr.Tabs():
+ with gr.TabItem(i18n("语音生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ with gr.Group():
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=16):
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
+ with gr.Row():
+ #(by - katana 参考音频列表)
+ wavs_dropdown = gr.Dropdown(label="参考音频列表",choices=reference_wavs,value="选择参考音频或者自己上传",interactive=True,scale=5)
+ refresh_ref_button=gr.Button("刷新",scale=1)
+ refresh_ref_button.click(fn=change_ref_choices,inputs=[],outputs=wavs_dropdown)
+ with gr.Column(scale=13):
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True,scale=1)
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。
开启后无视填写的参考文本。")))
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
+ wavs_dropdown.change(change_wav,[wavs_dropdown],[inp_ref,prompt_text])
+ with gr.Column(scale=14):
+ prompt_language = gr.Dropdown(
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
+ )
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=13):
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
+ with gr.Column(scale=7):
+ text_language = gr.Dropdown(
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
+ )
+ how_to_cut = gr.Dropdown(
+ label=i18n("怎么切"),
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
+ value=i18n("按中文句号。切"),
+ interactive=True, scale=1
+ )
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
+ pause_duration = gr.Slider(
+ minimum=0.01,
+ maximum=2,
+ step=0.01,
+ label=i18n("音频片段间隔时长(秒)"),
+ value=0.2,
+ interactive=True,
+ scale=1
+ )
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k(采样率,越高语气越丰富)"),value=15,interactive=True, scale=1)
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p(建议不动)"),value=1,interactive=True, scale=1)
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature(越低越稳定,但是语气稍有平淡)"),value=1,interactive=True, scale=1)
+ # (by - katana 热词替换按钮)
+ replace_hot_words = gr.Button("替换热词")
+ replace_hot_words.click(fn=hot_replace, inputs=text, outputs=text)
+ # with gr.Column():
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
+ with gr.Row():
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
+
+ inference_button.click(
+ get_tts_wav,
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration],
+ [output],
+ )
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
+
+ # (by - katana 校对标签页)
+ with gr.TabItem(i18n("校对生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ split_button = gr.Button(value="切分文本(前一页整段按句号切分,方便单独生成)", variant="primary", size='lg')
+ # (by - katana 按段落切分按钮)
+ split_button2 = gr.Button(value="按段落切分", variant="primary", size='lg')
+
+ with gr.Accordion("重新生成列表"):
+ textboxes = []
+ for i in range(100): # 创建200个水平布局
+ with gr.Row(): # 每行都是一个新的水平布局
+ text_input = gr.Textbox(label=f"切分文本 {i + 1}", scale=4)
+ button = gr.Button(value="重新生成", scale=1)
+ audio_output = gr.Audio(scale=2)
+ # 将按钮与函数绑定,传递文本框的内容作为输入
+ button.click(fn=get_tts_wav, inputs=[inp_ref, prompt_text, prompt_language, text_input, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration], outputs=audio_output)
+ textboxes.append(text_input)
+ split_button.click(fn=split_text_and_fill_boxes, inputs=[text, *textboxes], outputs=textboxes)
+ split_button2.click(fn=split_by_n, inputs=[text, *textboxes], outputs=textboxes)
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
+ # with gr.Row():
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
+ # button1.click(cut1, [text_inp], [text_opt])
+ # button2.click(cut2, [text_inp], [text_opt])
+ # button3.click(cut3, [text_inp], [text_opt])
+ # button4.click(cut4, [text_inp], [text_opt])
+ # button5.click(cut5, [text_inp], [text_opt])
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
+
+if __name__ == '__main__':
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=infer_ttswebui,
+ quiet=True,
+ )
diff --git a/GPT_SoVITS/inference_webui_model_test.py b/GPT_SoVITS/inference_webui_model_test.py
new file mode 100644
index 00000000..a92f5060
--- /dev/null
+++ b/GPT_SoVITS/inference_webui_model_test.py
@@ -0,0 +1,1047 @@
+'''
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+'''
+import logging
+import traceback
+
+logging.getLogger("markdown_it").setLevel(logging.ERROR)
+logging.getLogger("urllib3").setLevel(logging.ERROR)
+logging.getLogger("httpcore").setLevel(logging.ERROR)
+logging.getLogger("httpx").setLevel(logging.ERROR)
+logging.getLogger("asyncio").setLevel(logging.ERROR)
+logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
+logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
+logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
+import LangSegment, os, re, sys, json
+import pdb
+import torch
+from scipy.io.wavfile import write
+import shutil
+
+output_dir='生成记录'
+os.makedirs(output_dir,exist_ok=True)
+
+version=os.environ.get("version","v2")
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):
+ _[0].append(pretrained_gpt_name[i])
+ if os.path.exists(pretrained_sovits_name[i]):
+ _[-1].append(pretrained_sovits_name[i])
+pretrained_gpt_name,pretrained_sovits_name = _
+
+
+
+if os.path.exists(f"./weight.json"):
+ pass
+else:
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
+
+with open(f"./weight.json", 'r', encoding="utf-8") as file:
+ weight_data = file.read()
+ weight_data=json.loads(weight_data)
+ gpt_path = os.environ.get(
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
+ sovits_path = os.environ.get(
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
+ if isinstance(gpt_path,list):
+ gpt_path = gpt_path[0]
+ if isinstance(sovits_path,list):
+ sovits_path = sovits_path[0]
+
+# gpt_path = os.environ.get(
+# "gpt_path", pretrained_gpt_name
+# )
+# sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
+cnhubert_base_path = os.environ.get(
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
+)
+bert_path = os.environ.get(
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
+)
+infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
+infer_ttswebui = int(infer_ttswebui)
+is_share = os.environ.get("is_share", "False")
+is_share = eval(is_share)
+if "_CUDA_VISIBLE_DEVICES" in os.environ:
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
+is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
+punctuation = set(['!', '?', '…', ',', '.', '-'," "])
+import gradio as gr
+from transformers import AutoModelForMaskedLM, AutoTokenizer
+import numpy as np
+import librosa
+from feature_extractor import cnhubert
+
+cnhubert.cnhubert_base_path = cnhubert_base_path
+
+from module.models import SynthesizerTrn
+from AR.models.t2s_lightning_module import Text2SemanticLightningModule
+from text import cleaned_text_to_sequence
+from text.cleaner import clean_text
+from time import time as ttime
+from module.mel_processing import spectrogram_torch
+from tools.my_utils import load_audio
+from tools.i18n.i18n import I18nAuto, scan_language_list
+
+language=os.environ.get("language","Auto")
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
+i18n = I18nAuto(language=language)
+
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
+
+if torch.cuda.is_available():
+ device = "cuda"
+else:
+ device = "cpu"
+
+dict_language_v1 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+}
+dict_language_v2 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("粤语"): "all_yue",#全部按中文识别
+ i18n("韩文"): "all_ko",#全部按韩文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
+}
+dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+
+tokenizer = AutoTokenizer.from_pretrained(bert_path)
+bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
+if is_half == True:
+ bert_model = bert_model.half().to(device)
+else:
+ bert_model = bert_model.to(device)
+
+
+def get_bert_feature(text, word2ph):
+ with torch.no_grad():
+ inputs = tokenizer(text, return_tensors="pt")
+ for i in inputs:
+ inputs[i] = inputs[i].to(device)
+ res = bert_model(**inputs, output_hidden_states=True)
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
+ assert len(word2ph) == len(text)
+ phone_level_feature = []
+ for i in range(len(word2ph)):
+ repeat_feature = res[i].repeat(word2ph[i], 1)
+ phone_level_feature.append(repeat_feature)
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
+ return phone_level_feature.T
+
+
+class DictToAttrRecursive(dict):
+ def __init__(self, input_dict):
+ super().__init__(input_dict)
+ for key, value in input_dict.items():
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ self[key] = value
+ setattr(self, key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+ def __setattr__(self, key, value):
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ super(DictToAttrRecursive, self).__setitem__(key, value)
+ super().__setattr__(key, value)
+
+ def __delattr__(self, item):
+ try:
+ del self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+
+ssl_model = cnhubert.get_model()
+if is_half == True:
+ ssl_model = ssl_model.half().to(device)
+else:
+ ssl_model = ssl_model.to(device)
+
+
+def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
+ global vq_model, hps, version, dict_language
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
+ hps = dict_s2["config"]
+ hps = DictToAttrRecursive(hps)
+ hps.model.semantic_frame_rate = "25hz"
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
+ hps.model.version = "v1"
+ else:
+ hps.model.version = "v2"
+ version = hps.model.version
+ # print("sovits版本:",hps.model.version)
+ vq_model = SynthesizerTrn(
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model
+ )
+ if ("pretrained" not in sovits_path):
+ del vq_model.enc_q
+ if is_half == True:
+ vq_model = vq_model.half().to(device)
+ else:
+ vq_model = vq_model.to(device)
+ vq_model.eval()
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["SoVITS"][version]=sovits_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+ if prompt_language is not None and text_language is not None:
+ if prompt_language in list(dict_language.keys()):
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
+ else:
+ prompt_text_update = {'__type__':'update', 'value':''}
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
+ if text_language in list(dict_language.keys()):
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
+ else:
+ text_update = {'__type__':'update', 'value':''}
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
+
+
+
+change_sovits_weights(sovits_path)
+
+
+def change_gpt_weights(gpt_path):
+ global hz, max_sec, t2s_model, config
+ hz = 50
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
+ config = dict_s1["config"]
+ max_sec = config["data"]["max_sec"]
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
+ t2s_model.load_state_dict(dict_s1["weight"])
+ if is_half == True:
+ t2s_model = t2s_model.half()
+ t2s_model = t2s_model.to(device)
+ t2s_model.eval()
+ total = sum([param.nelement() for param in t2s_model.parameters()])
+ print("Number of parameter: %.2fM" % (total / 1e6))
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["GPT"][version]=gpt_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+
+
+change_gpt_weights(gpt_path)
+
+
+def get_spepc(hps, filename):
+ audio = load_audio(filename, int(hps.data.sampling_rate))
+ audio = torch.FloatTensor(audio)
+ maxx=audio.abs().max()
+ if(maxx>1):audio/=min(2,maxx)
+ audio_norm = audio
+ audio_norm = audio_norm.unsqueeze(0)
+ spec = spectrogram_torch(
+ audio_norm,
+ hps.data.filter_length,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ center=False,
+ )
+ return spec
+
+def clean_text_inf(text, language, version):
+ phones, word2ph, norm_text = clean_text(text, language, version)
+ phones = cleaned_text_to_sequence(phones, version)
+ return phones, word2ph, norm_text
+
+dtype=torch.float16 if is_half == True else torch.float32
+def get_bert_inf(phones, word2ph, norm_text, language):
+ language=language.replace("all_","")
+ if language == "zh":
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
+ else:
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+
+ return bert
+
+
+splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
+
+
+def get_first(text):
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
+ text = re.split(pattern, text)[0].strip()
+ return text
+
+from text import chinese
+def get_phones_and_bert(text,language,version,final=False):
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
+ language = language.replace("all_","")
+ if language == "en":
+ LangSegment.setfilters(["en"])
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ formattext = text
+ while " " in formattext:
+ formattext = formattext.replace(" ", " ")
+ if language == "zh":
+ if re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"zh",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = get_bert_feature(norm_text, word2ph).to(device)
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"yue",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
+ textlist=[]
+ langlist=[]
+ LangSegment.setfilters(["zh","ja","en","ko"])
+ if language == "auto":
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ elif language == "auto_yue":
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "zh":
+ tmp["lang"] = "yue"
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ else:
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "en":
+ langlist.append(tmp["lang"])
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ langlist.append(language)
+ textlist.append(tmp["text"])
+ print(textlist)
+ print(langlist)
+ phones_list = []
+ bert_list = []
+ norm_text_list = []
+ for i in range(len(textlist)):
+ lang = langlist[i]
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
+ phones_list.append(phones)
+ norm_text_list.append(norm_text)
+ bert_list.append(bert)
+ bert = torch.cat(bert_list, dim=1)
+ phones = sum(phones_list, [])
+ norm_text = ''.join(norm_text_list)
+
+ if not final and len(phones) < 6:
+ return get_phones_and_bert("." + text,language,version,final=True)
+
+ return phones,bert.to(dtype),norm_text
+
+
+def merge_short_text_in_array(texts, threshold):
+ if (len(texts)) < 2:
+ return texts
+ result = []
+ text = ""
+ for ele in texts:
+ text += ele
+ if len(text) >= threshold:
+ result.append(text)
+ text = ""
+ if (len(text) > 0):
+ if len(result) == 0:
+ result.append(text)
+ else:
+ result[len(result) - 1] += text
+ return result
+
+##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
+# cache_tokens={}#暂未实现清理机制
+cache= {}
+def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123,pause_duration=0.5):
+ global cache
+ file_name = text[:30]
+ file_name = file_name.strip()
+ match = re.search(r'(\d+)(?=\.[^\.]*\.[^\.]*$)', ref_wav_path)
+ if match:
+ R_value = match.group(1) # 获取倒数第二个点号前的数字
+ else:
+ R_value = "" # 如果没有找到标号,可以设置一个默认值
+ if ref_wav_path:pass
+ else:gr.Warning(i18n('请上传参考音频'))
+ if text:pass
+ else:gr.Warning(i18n('请填入推理文本'))
+ t = []
+ if prompt_text is None or len(prompt_text) == 0:
+ ref_free = True
+ t0 = ttime()
+ prompt_language = dict_language[prompt_language]
+ text_language = dict_language[text_language]
+
+
+ if not ref_free:
+ prompt_text = prompt_text.strip("\n")
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
+ print(i18n("实际输入的参考文本:"), prompt_text)
+ text = text.strip("\n")
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
+
+ print(i18n("实际输入的目标文本:"), text)
+ zero_wav = np.zeros(
+ int(hps.data.sampling_rate * 0.3),
+ dtype=np.float16 if is_half == True else np.float32,
+ )
+ # zero_wav1 = np.zeros(
+ # int(hps.data.sampling_rate * 0.5),
+ # dtype=np.float16 if is_half == True else np.float32,
+ # )
+ # (by-katana 重新生成0填充音频片段,传参加一个pause_duration)
+ pause_samples = int(hps.data.sampling_rate * pause_duration)
+ zero_wav1 = np.zeros(pause_samples, dtype=np.float16 if is_half else np.float32)
+ if not ref_free:
+ with torch.no_grad():
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
+ wav16k = torch.from_numpy(wav16k)
+ zero_wav_torch = torch.from_numpy(zero_wav)
+ if is_half == True:
+ wav16k = wav16k.half().to(device)
+ zero_wav_torch = zero_wav_torch.half().to(device)
+ else:
+ wav16k = wav16k.to(device)
+ zero_wav_torch = zero_wav_torch.to(device)
+ wav16k = torch.cat([wav16k, zero_wav_torch])
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
+ "last_hidden_state"
+ ].transpose(
+ 1, 2
+ ) # .float()
+ codes = vq_model.extract_latent(ssl_content)
+ prompt_semantic = codes[0, 0]
+ prompt = prompt_semantic.unsqueeze(0).to(device)
+
+ t1 = ttime()
+ t.append(t1-t0)
+
+ if (how_to_cut == i18n("凑四句一切")):
+ text = cut1(text)
+ elif (how_to_cut == i18n("凑50字一切")):
+ text = cut2(text)
+ elif (how_to_cut == i18n("按中文句号。切")):
+ text = cut3(text)
+ elif (how_to_cut == i18n("按英文句号.切")):
+ text = cut4(text)
+ elif (how_to_cut == i18n("按标点符号切")):
+ text = cut5(text)
+ while "\n\n" in text:
+ text = text.replace("\n\n", "\n")
+ print(i18n("实际输入的目标文本(切句后):"), text)
+ texts = text.split("\n")
+ texts = process_text(texts)
+ texts = merge_short_text_in_array(texts, 5)
+ audio_opt = []
+ if not ref_free:
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
+
+ for i_text,text in enumerate(texts):
+ # 解决输入目标文本的空行导致报错的问题
+ if (len(text.strip()) == 0):
+ continue
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
+ print(i18n("实际输入的目标文本(每句):"), text)
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
+ if not ref_free:
+ bert = torch.cat([bert1, bert2], 1)
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
+ else:
+ bert = bert2
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
+
+ bert = bert.to(device).unsqueeze(0)
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
+
+ t2 = ttime()
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
+ # print(cache.keys(),if_freeze)
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
+ else:
+ with torch.no_grad():
+ pred_semantic, idx = t2s_model.model.infer_panel(
+ all_phoneme_ids,
+ all_phoneme_len,
+ None if ref_free else prompt,
+ bert,
+ # prompt_phone_len=ph_offset,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ early_stop_num=hz * max_sec,
+ )
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
+ cache[i_text]=pred_semantic
+ t3 = ttime()
+ refers=[]
+ if(inp_refs):
+ for path in inp_refs:
+ try:
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
+ refers.append(refer)
+ except:
+ traceback.print_exc()
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
+ if max_audio>1:audio/=max_audio
+ audio_opt.append(audio)
+ # (by - katana,append新的zero_wave)
+ audio_opt.append(zero_wav1)
+ t4 = ttime()
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
+ t1 = ttime()
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
+ )
+ concatenated_audio = np.concatenate(audio_opt, 0)
+ file_path = os.path.join(output_dir, f"r{R_value}-{file_name}.wav")
+ write(file_path, hps.data.sampling_rate, (concatenated_audio * 32768).astype(np.int16))
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
+ np.int16
+ )
+
+
+def get_tts_wav1(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20,
+ top_p=0.6, temperature=0.6, ref_free=False, speed=1, if_freeze=False, inp_refs=123, pause_duration=0.5):
+ global cache
+ file_name = text[:30]
+ file_name = file_name.strip()
+ # match = re.search(r'(\d+)(?=\.[^\.]*\.[^\.]*$)', ref_wav_path)
+ # if match:
+ # R_value = match.group(1) # 获取倒数第二个点号前的数字
+ # else:
+ # R_value = "" # 如果没有找到标号,可以设置一个默认值
+ if ref_wav_path:
+ pass
+ else:
+ gr.Warning(i18n('请上传参考音频'))
+ if text:
+ pass
+ else:
+ gr.Warning(i18n('请填入推理文本'))
+ t = []
+ if prompt_text is None or len(prompt_text) == 0:
+ ref_free = True
+ t0 = ttime()
+ prompt_language = dict_language[prompt_language]
+ text_language = dict_language[text_language]
+
+ if not ref_free:
+ prompt_text = prompt_text.strip("\n")
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
+ print(i18n("实际输入的参考文本:"), prompt_text)
+ text = text.strip("\n")
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
+
+ print(i18n("实际输入的目标文本:"), text)
+ zero_wav = np.zeros(
+ int(hps.data.sampling_rate * 0.3),
+ dtype=np.float16 if is_half == True else np.float32,
+ )
+ # zero_wav1 = np.zeros(
+ # int(hps.data.sampling_rate * 0.5),
+ # dtype=np.float16 if is_half == True else np.float32,
+ # )
+ # (by-katana 重新生成0填充音频片段,传参加一个pause_duration)
+ pause_samples = int(hps.data.sampling_rate * pause_duration)
+ zero_wav1 = np.zeros(pause_samples, dtype=np.float16 if is_half else np.float32)
+ if not ref_free:
+ with torch.no_grad():
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
+ wav16k = torch.from_numpy(wav16k)
+ zero_wav_torch = torch.from_numpy(zero_wav)
+ if is_half == True:
+ wav16k = wav16k.half().to(device)
+ zero_wav_torch = zero_wav_torch.half().to(device)
+ else:
+ wav16k = wav16k.to(device)
+ zero_wav_torch = zero_wav_torch.to(device)
+ wav16k = torch.cat([wav16k, zero_wav_torch])
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
+ "last_hidden_state"
+ ].transpose(
+ 1, 2
+ ) # .float()
+ codes = vq_model.extract_latent(ssl_content)
+ prompt_semantic = codes[0, 0]
+ prompt = prompt_semantic.unsqueeze(0).to(device)
+
+ t1 = ttime()
+ t.append(t1 - t0)
+
+ if (how_to_cut == i18n("凑四句一切")):
+ text = cut1(text)
+ elif (how_to_cut == i18n("凑50字一切")):
+ text = cut2(text)
+ elif (how_to_cut == i18n("按中文句号。切")):
+ text = cut3(text)
+ elif (how_to_cut == i18n("按英文句号.切")):
+ text = cut4(text)
+ elif (how_to_cut == i18n("按标点符号切")):
+ text = cut5(text)
+ while "\n\n" in text:
+ text = text.replace("\n\n", "\n")
+ print(i18n("实际输入的目标文本(切句后):"), text)
+ texts = text.split("\n")
+ texts = process_text(texts)
+ texts = merge_short_text_in_array(texts, 5)
+ audio_opt = []
+ if not ref_free:
+ phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language, version)
+
+ for i_text, text in enumerate(texts):
+ # 解决输入目标文本的空行导致报错的问题
+ if (len(text.strip()) == 0):
+ continue
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
+ print(i18n("实际输入的目标文本(每句):"), text)
+ phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language, version)
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
+ if not ref_free:
+ bert = torch.cat([bert1, bert2], 1)
+ all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
+ else:
+ bert = bert2
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
+
+ bert = bert.to(device).unsqueeze(0)
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
+
+ t2 = ttime()
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
+ # print(cache.keys(),if_freeze)
+ if (i_text in cache and if_freeze == True):
+ pred_semantic = cache[i_text]
+ else:
+ with torch.no_grad():
+ pred_semantic, idx = t2s_model.model.infer_panel(
+ all_phoneme_ids,
+ all_phoneme_len,
+ None if ref_free else prompt,
+ bert,
+ # prompt_phone_len=ph_offset,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ early_stop_num=hz * max_sec,
+ )
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
+ cache[i_text] = pred_semantic
+ t3 = ttime()
+ refers = []
+ if (inp_refs):
+ for path in inp_refs:
+ try:
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
+ refers.append(refer)
+ except:
+ traceback.print_exc()
+ if (len(refers) == 0): refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,
+ speed=speed).detach().cpu().numpy()[0, 0])
+ max_audio = np.abs(audio).max() # 简单防止16bit爆音
+ if max_audio > 1: audio /= max_audio
+ audio_opt.append(audio)
+ # (by - katana,append新的zero_wave)
+ audio_opt.append(zero_wav1)
+ t4 = ttime()
+ t.extend([t2 - t1, t3 - t2, t4 - t3])
+ t1 = ttime()
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
+ )
+ concatenated_audio = np.concatenate(audio_opt, 0)
+ file_path = os.path.join(output_dir, f"{file_name}.wav")
+ write(file_path, hps.data.sampling_rate, (concatenated_audio * 32768).astype(np.int16))
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
+ np.int16
+ )
+
+def split(todo_text):
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
+ if todo_text[-1] not in splits:
+ todo_text += "。"
+ i_split_head = i_split_tail = 0
+ len_text = len(todo_text)
+ todo_texts = []
+ while 1:
+ if i_split_head >= len_text:
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
+ if todo_text[i_split_head] in splits:
+ i_split_head += 1
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
+ i_split_tail = i_split_head
+ else:
+ i_split_head += 1
+ return todo_texts
+
+
+def cut1(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ split_idx = list(range(0, len(inps), 4))
+ split_idx[-1] = None
+ if len(split_idx) > 1:
+ opts = []
+ for idx in range(len(split_idx) - 1):
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
+ else:
+ opts = [inp]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut2(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ if len(inps) < 2:
+ return inp
+ opts = []
+ summ = 0
+ tmp_str = ""
+ for i in range(len(inps)):
+ summ += len(inps[i])
+ tmp_str += inps[i]
+ if summ > 50:
+ summ = 0
+ opts.append(tmp_str)
+ tmp_str = ""
+ if tmp_str != "":
+ opts.append(tmp_str)
+ # print(opts)
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
+ opts[-2] = opts[-2] + opts[-1]
+ opts = opts[:-1]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut3(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+def cut4(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
+def cut5(inp):
+ inp = inp.strip("\n")
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
+ mergeitems = []
+ items = []
+
+ for i, char in enumerate(inp):
+ if char in punds:
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
+ items.append(char)
+ else:
+ items.append(char)
+ mergeitems.append("".join(items))
+ items = []
+ else:
+ items.append(char)
+
+ if items:
+ mergeitems.append("".join(items))
+
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
+ return "\n".join(opt)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def process_text(texts):
+ _text=[]
+ if all(text in [None, " ", "\n",""] for text in texts):
+ raise ValueError(i18n("请输入有效文本"))
+ for text in texts:
+ if text in [None, " ", ""]:
+ pass
+ else:
+ _text.append(text)
+ return _text
+
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
+
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def get_weights_names(GPT_weight_root, SoVITS_weight_root):
+ SoVITS_names = [i for i in pretrained_sovits_name]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [i for i in pretrained_gpt_name]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+
+SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+
+def html_center(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+def html_left(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+# (by - katana增加参考音频列表)
+reference_wavs=["选择参考音频或自己上传"]
+for name in os.listdir("./refwavs"):
+ reference_wavs.append(name)
+#不一定有用 更新下拉列表
+def change_ref_choices():
+ reference_wavs = [i18n("Please select the reference audio or upload it yourself.")]
+ for name in os.listdir("./refwavs/"):
+ reference_wavs.append(name)
+ return {"choices":reference_wavs, "__type__": "update"}
+
+
+# (by - katana 参考音频列表移除[])
+def replace_speaker(text):
+ return re.sub(r"\[.*?\]", "", text, flags=re.UNICODE)
+
+def change_wav(audio_path):
+ text = audio_path.replace(".wav","").replace(".mp3","").replace(".WAV","")
+ text = replace_speaker(text)
+ return f"./refwavs/{audio_path}",text
+
+#(by - katana 切分文本发送到下面文字框中)
+def split_text_and_fill_boxes(input_text, *textboxes):
+ sentences = []
+ # 按句号切分文本
+ for line in input_text.split('\n'):
+ sentences.extend(line.split('。'))
+ # 去除空字符串
+ sentences = [sentence.strip()+'。' for sentence in sentences if sentence.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 按照段落切分)
+def split_by_n(input_text, *textboxes):
+ # 去掉全角括号及括号里的内容
+ cleaned_text = re.sub(r"\(.*?\)", "", input_text)
+ # 按换行符切分文本
+ sentences = [line.strip() for line in cleaned_text.split('\n') if line.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+# (by - katana 热词替换 需要同级目录有一个hotword.json文件)
+def hot_replace(input_text):
+ with open('./GPT_SoVITS/hotwords.json', 'r', encoding='utf-8') as file:
+ hotwords_dict = json.load(file)
+ for key, value in hotwords_dict.items():
+ input_text = input_text.replace(key, value)
+ return gr.update(value=input_text)
+
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ gr.Markdown(
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ with gr.Tabs():
+ with gr.TabItem(i18n("语音生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ with gr.Group():
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=16):
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
+ with gr.Row():
+ #(by - katana 参考音频列表)
+ wavs_dropdown = gr.Dropdown(label="参考音频列表",choices=reference_wavs,value="选择参考音频或者自己上传",interactive=True,scale=5)
+ refresh_ref_button=gr.Button("刷新",scale=1)
+ refresh_ref_button.click(fn=change_ref_choices,inputs=[],outputs=wavs_dropdown)
+ with gr.Column(scale=13):
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True,scale=1)
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。
开启后无视填写的参考文本。")))
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
+ wavs_dropdown.change(change_wav,[wavs_dropdown],[inp_ref,prompt_text])
+ with gr.Column(scale=14):
+ prompt_language = gr.Dropdown(
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
+ )
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=13):
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
+ with gr.Column(scale=7):
+ text_language = gr.Dropdown(
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
+ )
+ how_to_cut = gr.Dropdown(
+ label=i18n("怎么切"),
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
+ value=i18n("按中文句号。切"),
+ interactive=True, scale=1
+ )
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
+ pause_duration = gr.Slider(
+ minimum=0.01,
+ maximum=2,
+ step=0.01,
+ label=i18n("音频片段间隔时长(秒)"),
+ value=0.2,
+ interactive=True,
+ scale=1
+ )
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k(采样率,越高语气越丰富)"),value=15,interactive=True, scale=1)
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p(建议不动)"),value=1,interactive=True, scale=1)
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature(越低越稳定,但是语气稍有平淡)"),value=1,interactive=True, scale=1)
+ # (by - katana 热词替换按钮)
+ replace_hot_words = gr.Button("替换热词")
+ replace_hot_words.click(fn=hot_replace, inputs=text, outputs=text)
+ # with gr.Column():
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
+ with gr.Row():
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
+
+ inference_button.click(
+ get_tts_wav,
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration],
+ [output],
+ )
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
+
+ # (by - katana 校对标签页)
+ with gr.TabItem(i18n("校对生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ split_button = gr.Button(value="切分文本(前一页整段按句号切分,方便单独生成)", variant="primary", size='lg')
+ # (by - katana 按段落切分按钮)
+ split_button2 = gr.Button(value="按段落切分", variant="primary", size='lg')
+
+ with gr.Accordion("重新生成列表"):
+ textboxes = []
+ for i in range(100): # 创建200个水平布局
+ with gr.Row(): # 每行都是一个新的水平布局
+ text_input = gr.Textbox(label=f"切分文本 {i + 1}", scale=4)
+ button = gr.Button(value="重新生成", scale=1)
+ audio_output = gr.Audio(scale=2)
+ # 将按钮与函数绑定,传递文本框的内容作为输入
+ button.click(fn=get_tts_wav1, inputs=[inp_ref, prompt_text, prompt_language, text_input, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration], outputs=audio_output)
+ textboxes.append(text_input)
+ split_button.click(fn=split_text_and_fill_boxes, inputs=[text, *textboxes], outputs=textboxes)
+ split_button2.click(fn=split_by_n, inputs=[text, *textboxes], outputs=textboxes)
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
+ # with gr.Row():
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
+ # button1.click(cut1, [text_inp], [text_opt])
+ # button2.click(cut2, [text_inp], [text_opt])
+ # button3.click(cut3, [text_inp], [text_opt])
+ # button4.click(cut4, [text_inp], [text_opt])
+ # button5.click(cut5, [text_inp], [text_opt])
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
+
+if __name__ == '__main__':
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=infer_ttswebui,
+ quiet=True,
+ )
diff --git a/GPT_SoVITS/inference_webui_pro.py b/GPT_SoVITS/inference_webui_pro.py
new file mode 100644
index 00000000..5f76cd23
--- /dev/null
+++ b/GPT_SoVITS/inference_webui_pro.py
@@ -0,0 +1,842 @@
+'''
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+'''
+import logging
+import traceback
+
+logging.getLogger("markdown_it").setLevel(logging.ERROR)
+logging.getLogger("urllib3").setLevel(logging.ERROR)
+logging.getLogger("httpcore").setLevel(logging.ERROR)
+logging.getLogger("httpx").setLevel(logging.ERROR)
+logging.getLogger("asyncio").setLevel(logging.ERROR)
+logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
+logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
+logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
+import LangSegment, os, re, sys, json
+import pdb
+import torch
+
+version=os.environ.get("version","v2")
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):
+ _[0].append(pretrained_gpt_name[i])
+ if os.path.exists(pretrained_sovits_name[i]):
+ _[-1].append(pretrained_sovits_name[i])
+pretrained_gpt_name,pretrained_sovits_name = _
+
+
+
+if os.path.exists(f"./weight.json"):
+ pass
+else:
+ with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
+
+with open(f"./weight.json", 'r', encoding="utf-8") as file:
+ weight_data = file.read()
+ weight_data=json.loads(weight_data)
+ gpt_path = os.environ.get(
+ "gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
+ sovits_path = os.environ.get(
+ "sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
+ if isinstance(gpt_path,list):
+ gpt_path = gpt_path[0]
+ if isinstance(sovits_path,list):
+ sovits_path = sovits_path[0]
+
+# gpt_path = os.environ.get(
+# "gpt_path", pretrained_gpt_name
+# )
+# sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
+cnhubert_base_path = os.environ.get(
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
+)
+bert_path = os.environ.get(
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
+)
+infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
+infer_ttswebui = int(infer_ttswebui)
+is_share = os.environ.get("is_share", "False")
+is_share = eval(is_share)
+if "_CUDA_VISIBLE_DEVICES" in os.environ:
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
+is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
+punctuation = set(['!', '?', '…', ',', '.', '-'," "])
+import gradio as gr
+from transformers import AutoModelForMaskedLM, AutoTokenizer
+import numpy as np
+import librosa
+from feature_extractor import cnhubert
+
+cnhubert.cnhubert_base_path = cnhubert_base_path
+
+from module.models import SynthesizerTrn
+from AR.models.t2s_lightning_module import Text2SemanticLightningModule
+from text import cleaned_text_to_sequence
+from text.cleaner import clean_text
+from time import time as ttime
+from module.mel_processing import spectrogram_torch
+from tools.my_utils import load_audio
+from tools.i18n.i18n import I18nAuto, scan_language_list
+
+language=os.environ.get("language","Auto")
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
+i18n = I18nAuto(language=language)
+
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
+
+if torch.cuda.is_available():
+ device = "cuda"
+else:
+ device = "cpu"
+
+dict_language_v1 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+}
+dict_language_v2 = {
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("粤语"): "all_yue",#全部按中文识别
+ i18n("韩文"): "all_ko",#全部按韩文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("粤英混合"): "yue",#按粤英混合识别####不变
+ i18n("韩英混合"): "ko",#按韩英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
+ i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
+}
+dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+
+tokenizer = AutoTokenizer.from_pretrained(bert_path)
+bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
+if is_half == True:
+ bert_model = bert_model.half().to(device)
+else:
+ bert_model = bert_model.to(device)
+
+
+def get_bert_feature(text, word2ph):
+ with torch.no_grad():
+ inputs = tokenizer(text, return_tensors="pt")
+ for i in inputs:
+ inputs[i] = inputs[i].to(device)
+ res = bert_model(**inputs, output_hidden_states=True)
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
+ assert len(word2ph) == len(text)
+ phone_level_feature = []
+ for i in range(len(word2ph)):
+ repeat_feature = res[i].repeat(word2ph[i], 1)
+ phone_level_feature.append(repeat_feature)
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
+ return phone_level_feature.T
+
+
+class DictToAttrRecursive(dict):
+ def __init__(self, input_dict):
+ super().__init__(input_dict)
+ for key, value in input_dict.items():
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ self[key] = value
+ setattr(self, key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+ def __setattr__(self, key, value):
+ if isinstance(value, dict):
+ value = DictToAttrRecursive(value)
+ super(DictToAttrRecursive, self).__setitem__(key, value)
+ super().__setattr__(key, value)
+
+ def __delattr__(self, item):
+ try:
+ del self[item]
+ except KeyError:
+ raise AttributeError(f"Attribute {item} not found")
+
+
+ssl_model = cnhubert.get_model()
+if is_half == True:
+ ssl_model = ssl_model.half().to(device)
+else:
+ ssl_model = ssl_model.to(device)
+
+
+def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
+ global vq_model, hps, version, dict_language
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
+ hps = dict_s2["config"]
+ hps = DictToAttrRecursive(hps)
+ hps.model.semantic_frame_rate = "25hz"
+ if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
+ hps.model.version = "v1"
+ else:
+ hps.model.version = "v2"
+ version = hps.model.version
+ # print("sovits版本:",hps.model.version)
+ vq_model = SynthesizerTrn(
+ hps.data.filter_length // 2 + 1,
+ hps.train.segment_size // hps.data.hop_length,
+ n_speakers=hps.data.n_speakers,
+ **hps.model
+ )
+ if ("pretrained" not in sovits_path):
+ del vq_model.enc_q
+ if is_half == True:
+ vq_model = vq_model.half().to(device)
+ else:
+ vq_model = vq_model.to(device)
+ vq_model.eval()
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
+ dict_language = dict_language_v1 if version =='v1' else dict_language_v2
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["SoVITS"][version]=sovits_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+ if prompt_language is not None and text_language is not None:
+ if prompt_language in list(dict_language.keys()):
+ prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
+ else:
+ prompt_text_update = {'__type__':'update', 'value':''}
+ prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
+ if text_language in list(dict_language.keys()):
+ text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
+ else:
+ text_update = {'__type__':'update', 'value':''}
+ text_language_update = {'__type__':'update', 'value':i18n("中文")}
+ return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
+
+
+
+change_sovits_weights(sovits_path)
+
+
+def change_gpt_weights(gpt_path):
+ global hz, max_sec, t2s_model, config
+ hz = 50
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
+ config = dict_s1["config"]
+ max_sec = config["data"]["max_sec"]
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
+ t2s_model.load_state_dict(dict_s1["weight"])
+ if is_half == True:
+ t2s_model = t2s_model.half()
+ t2s_model = t2s_model.to(device)
+ t2s_model.eval()
+ total = sum([param.nelement() for param in t2s_model.parameters()])
+ print("Number of parameter: %.2fM" % (total / 1e6))
+ with open("./weight.json")as f:
+ data=f.read()
+ data=json.loads(data)
+ data["GPT"][version]=gpt_path
+ with open("./weight.json","w")as f:f.write(json.dumps(data))
+
+
+change_gpt_weights(gpt_path)
+
+
+def get_spepc(hps, filename):
+ audio = load_audio(filename, int(hps.data.sampling_rate))
+ audio = torch.FloatTensor(audio)
+ maxx=audio.abs().max()
+ if(maxx>1):audio/=min(2,maxx)
+ audio_norm = audio
+ audio_norm = audio_norm.unsqueeze(0)
+ spec = spectrogram_torch(
+ audio_norm,
+ hps.data.filter_length,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ center=False,
+ )
+ return spec
+
+def clean_text_inf(text, language, version):
+ phones, word2ph, norm_text = clean_text(text, language, version)
+ phones = cleaned_text_to_sequence(phones, version)
+ return phones, word2ph, norm_text
+
+dtype=torch.float16 if is_half == True else torch.float32
+def get_bert_inf(phones, word2ph, norm_text, language):
+ language=language.replace("all_","")
+ if language == "zh":
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
+ else:
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+
+ return bert
+
+
+splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
+
+
+def get_first(text):
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
+ text = re.split(pattern, text)[0].strip()
+ return text
+
+from text import chinese
+def get_phones_and_bert(text,language,version,final=False):
+ if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
+ language = language.replace("all_","")
+ if language == "en":
+ LangSegment.setfilters(["en"])
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ formattext = text
+ while " " in formattext:
+ formattext = formattext.replace(" ", " ")
+ if language == "zh":
+ if re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"zh",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = get_bert_feature(norm_text, word2ph).to(device)
+ elif language == "yue" and re.search(r'[A-Za-z]', formattext):
+ formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
+ formattext = chinese.mix_text_normalize(formattext)
+ return get_phones_and_bert(formattext,"yue",version)
+ else:
+ phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
+ bert = torch.zeros(
+ (1024, len(phones)),
+ dtype=torch.float16 if is_half == True else torch.float32,
+ ).to(device)
+ elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
+ textlist=[]
+ langlist=[]
+ LangSegment.setfilters(["zh","ja","en","ko"])
+ if language == "auto":
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ elif language == "auto_yue":
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "zh":
+ tmp["lang"] = "yue"
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ else:
+ for tmp in LangSegment.getTexts(text):
+ if tmp["lang"] == "en":
+ langlist.append(tmp["lang"])
+ else:
+ # 因无法区别中日韩文汉字,以用户输入为准
+ langlist.append(language)
+ textlist.append(tmp["text"])
+ print(textlist)
+ print(langlist)
+ phones_list = []
+ bert_list = []
+ norm_text_list = []
+ for i in range(len(textlist)):
+ lang = langlist[i]
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
+ phones_list.append(phones)
+ norm_text_list.append(norm_text)
+ bert_list.append(bert)
+ bert = torch.cat(bert_list, dim=1)
+ phones = sum(phones_list, [])
+ norm_text = ''.join(norm_text_list)
+
+ if not final and len(phones) < 6:
+ return get_phones_and_bert("." + text,language,version,final=True)
+
+ return phones,bert.to(dtype),norm_text
+
+
+def merge_short_text_in_array(texts, threshold):
+ if (len(texts)) < 2:
+ return texts
+ result = []
+ text = ""
+ for ele in texts:
+ text += ele
+ if len(text) >= threshold:
+ result.append(text)
+ text = ""
+ if (len(text) > 0):
+ if len(result) == 0:
+ result.append(text)
+ else:
+ result[len(result) - 1] += text
+ return result
+
+##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
+# cache_tokens={}#暂未实现清理机制
+cache= {}
+def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123,pause_duration=0.5):
+ global cache
+ if ref_wav_path:pass
+ else:gr.Warning(i18n('请上传参考音频'))
+ if text:pass
+ else:gr.Warning(i18n('请填入推理文本'))
+ t = []
+ if prompt_text is None or len(prompt_text) == 0:
+ ref_free = True
+ t0 = ttime()
+ prompt_language = dict_language[prompt_language]
+ text_language = dict_language[text_language]
+
+
+ if not ref_free:
+ prompt_text = prompt_text.strip("\n")
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
+ print(i18n("实际输入的参考文本:"), prompt_text)
+ text = text.strip("\n")
+ # if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
+
+ print(i18n("实际输入的目标文本:"), text)
+ zero_wav = np.zeros(
+ int(hps.data.sampling_rate * 0.3),
+ dtype=np.float16 if is_half == True else np.float32,
+ )
+ # zero_wav1 = np.zeros(
+ # int(hps.data.sampling_rate * 0.5),
+ # dtype=np.float16 if is_half == True else np.float32,
+ # )
+ pause_samples = int(hps.data.sampling_rate * pause_duration)
+ zero_wav1 = np.zeros(pause_samples, dtype=np.float16 if is_half else np.float32)
+ if not ref_free:
+ with torch.no_grad():
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
+ if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
+ gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
+ raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
+ wav16k = torch.from_numpy(wav16k)
+ zero_wav_torch = torch.from_numpy(zero_wav)
+ if is_half == True:
+ wav16k = wav16k.half().to(device)
+ zero_wav_torch = zero_wav_torch.half().to(device)
+ else:
+ wav16k = wav16k.to(device)
+ zero_wav_torch = zero_wav_torch.to(device)
+ wav16k = torch.cat([wav16k, zero_wav_torch])
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
+ "last_hidden_state"
+ ].transpose(
+ 1, 2
+ ) # .float()
+ codes = vq_model.extract_latent(ssl_content)
+ prompt_semantic = codes[0, 0]
+ prompt = prompt_semantic.unsqueeze(0).to(device)
+
+ t1 = ttime()
+ t.append(t1-t0)
+
+ if (how_to_cut == i18n("凑四句一切")):
+ text = cut1(text)
+ elif (how_to_cut == i18n("凑50字一切")):
+ text = cut2(text)
+ elif (how_to_cut == i18n("按中文句号。切")):
+ text = cut3(text)
+ elif (how_to_cut == i18n("按英文句号.切")):
+ text = cut4(text)
+ elif (how_to_cut == i18n("按标点符号切")):
+ text = cut5(text)
+ while "\n\n" in text:
+ text = text.replace("\n\n", "\n")
+ print(i18n("实际输入的目标文本(切句后):"), text)
+ texts = text.split("\n")
+ texts = process_text(texts)
+ texts = merge_short_text_in_array(texts, 5)
+ audio_opt = []
+ if not ref_free:
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
+
+ for i_text,text in enumerate(texts):
+ # 解决输入目标文本的空行导致报错的问题
+ if (len(text.strip()) == 0):
+ continue
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
+ print(i18n("实际输入的目标文本(每句):"), text)
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
+ if not ref_free:
+ bert = torch.cat([bert1, bert2], 1)
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
+ else:
+ bert = bert2
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
+
+ bert = bert.to(device).unsqueeze(0)
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
+
+ t2 = ttime()
+ # cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
+ # print(cache.keys(),if_freeze)
+ if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
+ else:
+ with torch.no_grad():
+ pred_semantic, idx = t2s_model.model.infer_panel(
+ all_phoneme_ids,
+ all_phoneme_len,
+ None if ref_free else prompt,
+ bert,
+ # prompt_phone_len=ph_offset,
+ top_k=top_k,
+ top_p=top_p,
+ temperature=temperature,
+ early_stop_num=hz * max_sec,
+ )
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
+ cache[i_text]=pred_semantic
+ t3 = ttime()
+ refers=[]
+ if(inp_refs):
+ for path in inp_refs:
+ try:
+ refer = get_spepc(hps, path.name).to(dtype).to(device)
+ refers.append(refer)
+ except:
+ traceback.print_exc()
+ if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
+ audio = (vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed).detach().cpu().numpy()[0, 0])
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
+ if max_audio>1:audio/=max_audio
+ audio_opt.append(audio)
+ audio_opt.append(zero_wav1)
+ t4 = ttime()
+ t.extend([t2 - t1,t3 - t2, t4 - t3])
+ t1 = ttime()
+ print("%.3f\t%.3f\t%.3f\t%.3f" %
+ (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3]))
+ )
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
+ np.int16
+ )
+
+def split(todo_text):
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
+ if todo_text[-1] not in splits:
+ todo_text += "。"
+ i_split_head = i_split_tail = 0
+ len_text = len(todo_text)
+ todo_texts = []
+ while 1:
+ if i_split_head >= len_text:
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
+ if todo_text[i_split_head] in splits:
+ i_split_head += 1
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
+ i_split_tail = i_split_head
+ else:
+ i_split_head += 1
+ return todo_texts
+
+
+def cut1(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ split_idx = list(range(0, len(inps), 4))
+ split_idx[-1] = None
+ if len(split_idx) > 1:
+ opts = []
+ for idx in range(len(split_idx) - 1):
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
+ else:
+ opts = [inp]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut2(inp):
+ inp = inp.strip("\n")
+ inps = split(inp)
+ if len(inps) < 2:
+ return inp
+ opts = []
+ summ = 0
+ tmp_str = ""
+ for i in range(len(inps)):
+ summ += len(inps[i])
+ tmp_str += inps[i]
+ if summ > 50:
+ summ = 0
+ opts.append(tmp_str)
+ tmp_str = ""
+ if tmp_str != "":
+ opts.append(tmp_str)
+ # print(opts)
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
+ opts[-2] = opts[-2] + opts[-1]
+ opts = opts[:-1]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+def cut3(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip("。").split("。")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+def cut4(inp):
+ inp = inp.strip("\n")
+ opts = ["%s" % item for item in inp.strip(".").split(".")]
+ opts = [item for item in opts if not set(item).issubset(punctuation)]
+ return "\n".join(opts)
+
+
+# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
+def cut5(inp):
+ inp = inp.strip("\n")
+ punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
+ mergeitems = []
+ items = []
+
+ for i, char in enumerate(inp):
+ if char in punds:
+ if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
+ items.append(char)
+ else:
+ items.append(char)
+ mergeitems.append("".join(items))
+ items = []
+ else:
+ items.append(char)
+
+ if items:
+ mergeitems.append("".join(items))
+
+ opt = [item for item in mergeitems if not set(item).issubset(punds)]
+ return "\n".join(opt)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def process_text(texts):
+ _text=[]
+ if all(text in [None, " ", "\n",""] for text in texts):
+ raise ValueError(i18n("请输入有效文本"))
+ for text in texts:
+ if text in [None, " ", ""]:
+ pass
+ else:
+ _text.append(text)
+ return _text
+
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
+
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def get_weights_names(GPT_weight_root, SoVITS_weight_root):
+ SoVITS_names = [i for i in pretrained_sovits_name]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [i for i in pretrained_gpt_name]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+
+SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
+
+def html_center(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+def html_left(text, label='p'):
+ return f"""
+ <{label} style="margin: 0; padding: 0;">{text}{label}>
+
"""
+
+# 增加参考音频列表
+reference_wavs=["选择参考音频或自己上传"]
+for name in os.listdir("./refwavs"):
+ reference_wavs.append(name)
+#不一定有用 更新下拉列表
+def change_ref_choices():
+ reference_wavs = [i18n("Please select the reference audio or upload it yourself.")]
+ for name in os.listdir("./refwavs/"):
+ reference_wavs.append(name)
+ return {"choices":reference_wavs, "__type__": "update"}
+
+
+
+def replace_speaker(text):
+ return re.sub(r"\[.*?\]", "", text, flags=re.UNICODE)
+
+def change_wav(audio_path):
+ text = audio_path.replace(".wav","").replace(".mp3","").replace(".WAV","")
+ text = replace_speaker(text)
+ return f"./refwavs/{audio_path}",text
+
+#切分文本发送到下面文字框中
+def split_text_and_fill_boxes(input_text, *textboxes):
+ sentences = []
+ # 按句号切分文本
+ for line in input_text.split('\n'):
+ sentences.extend(line.split('。'))
+ # 去除空字符串
+ sentences = [sentence.strip()+'。' for sentence in sentences if sentence.strip()]
+
+ # 更新每个文本框的内容
+ updates = []
+ for i, textbox in enumerate(textboxes):
+ if i < len(sentences):
+ updates.append(gr.update(value=sentences[i]))
+ else:
+ updates.append(gr.update(value=""))
+ return updates
+
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ gr.Markdown(
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ with gr.Tabs():
+ with gr.TabItem(i18n("语音生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ with gr.Group():
+ gr.Markdown(html_center(i18n("模型切换"),'h3'))
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
+ gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=16):
+ inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
+ with gr.Row():
+ #参考音频列表
+ wavs_dropdown = gr.Dropdown(label="参考音频列表",choices=reference_wavs,value="选择参考音频或者自己上传",interactive=True,scale=5)
+ refresh_ref_button=gr.Button("刷新",scale=1)
+ refresh_ref_button.click(fn=change_ref_choices,inputs=[],outputs=wavs_dropdown)
+ with gr.Column(scale=13):
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True,scale=1)
+ gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开。
开启后无视填写的参考文本。")))
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
+ wavs_dropdown.change(change_wav,[wavs_dropdown],[inp_ref,prompt_text])
+ with gr.Column(scale=14):
+ prompt_language = gr.Dropdown(
+ label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
+ )
+ inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")
+ gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
+ with gr.Row():
+ with gr.Column(scale=13):
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
+ with gr.Column(scale=7):
+ text_language = gr.Dropdown(
+ label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
+ )
+ how_to_cut = gr.Dropdown(
+ label=i18n("怎么切"),
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
+ value=i18n("按中文句号。切"),
+ interactive=True, scale=1
+ )
+ gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
+ if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
+ speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
+ pause_duration = gr.Slider(
+ minimum=0.01,
+ maximum=2,
+ step=0.01,
+ label=i18n("音频片段间隔时长(秒)"),
+ value=0.2,
+ interactive=True,
+ scale=1
+ )
+ gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k(采样率,越高语气越丰富)"),value=15,interactive=True, scale=1)
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p(建议不动)"),value=1,interactive=True, scale=1)
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature(越低越稳定,但是语气稍有平淡)"),value=1,interactive=True, scale=1)
+ # with gr.Column():
+ # gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
+ # phoneme=gr.Textbox(label=i18n("音素框"), value="")
+ # get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
+ with gr.Row():
+ inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
+ output = gr.Audio(label=i18n("输出的语音"), scale=14)
+
+ inference_button.click(
+ get_tts_wav,
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration],
+ [output],
+ )
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
+
+
+ with gr.TabItem(i18n("校对生成")): # 提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ split_button = gr.Button(value="切分文本(前一页整段按句号切分,方便单独生成)", variant="primary", size='lg')
+
+ with gr.Accordion("重新生成列表"):
+ textboxes = []
+ for i in range(100): # 创建200个水平布局
+ with gr.Row(): # 每行都是一个新的水平布局
+ text_input = gr.Textbox(label=f"切分文本 {i + 1}", scale=4)
+ button = gr.Button(value="重新生成", scale=1)
+ audio_output = gr.Audio(scale=2)
+ # 将按钮与函数绑定,传递文本框的内容作为输入
+ button.click(fn=get_tts_wav, inputs=[inp_ref, prompt_text, prompt_language, text_input, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,pause_duration], outputs=audio_output)
+ textboxes.append(text_input)
+ split_button.click(fn=split_text_and_fill_boxes, inputs=[text, *textboxes], outputs=textboxes)
+ # gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
+ # with gr.Row():
+ # text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
+ # button1 = gr.Button(i18n("凑四句一切"), variant="primary")
+ # button2 = gr.Button(i18n("凑50字一切"), variant="primary")
+ # button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
+ # button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
+ # button5 = gr.Button(i18n("按标点符号切"), variant="primary")
+ # text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
+ # button1.click(cut1, [text_inp], [text_opt])
+ # button2.click(cut2, [text_inp], [text_opt])
+ # button3.click(cut3, [text_inp], [text_opt])
+ # button4.click(cut4, [text_inp], [text_opt])
+ # button5.click(cut5, [text_inp], [text_opt])
+ # gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
+
+if __name__ == '__main__':
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=infer_ttswebui,
+ quiet=True,
+ )
diff --git a/go-webui_pro.bat b/go-webui_pro.bat
new file mode 100644
index 00000000..9b64d972
--- /dev/null
+++ b/go-webui_pro.bat
@@ -0,0 +1,2 @@
+runtime\python.exe webui_pro.py zh_CN
+pause
diff --git a/go-webui_test.bat b/go-webui_test.bat
new file mode 100644
index 00000000..1cf917c2
--- /dev/null
+++ b/go-webui_test.bat
@@ -0,0 +1,2 @@
+runtime\python.exe webui_test.py zh_CN
+pause
diff --git a/webui_pro.py b/webui_pro.py
new file mode 100644
index 00000000..bcae73d2
--- /dev/null
+++ b/webui_pro.py
@@ -0,0 +1,1060 @@
+import os,sys
+if len(sys.argv)==1:sys.argv.append('v2')
+version="v1"if sys.argv[1]=="v1" else"v2"
+os.environ["version"]=version
+now_dir = os.getcwd()
+sys.path.insert(0, now_dir)
+import warnings
+warnings.filterwarnings("ignore")
+import json,yaml,torch,pdb,re,shutil
+import platform
+import psutil
+import signal
+torch.manual_seed(233333)
+tmp = os.path.join(now_dir, "TEMP")
+os.makedirs(tmp, exist_ok=True)
+os.environ["TEMP"] = tmp
+if(os.path.exists(tmp)):
+ for name in os.listdir(tmp):
+ if(name=="jieba.cache"):continue
+ path="%s/%s"%(tmp,name)
+ delete=os.remove if os.path.isfile(path) else shutil.rmtree
+ try:
+ delete(path)
+ except Exception as e:
+ print(str(e))
+ pass
+import site
+import traceback
+site_packages_roots = []
+for path in site.getsitepackages():
+ if "packages" in path:
+ site_packages_roots.append(path)
+if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
+#os.environ["OPENBLAS_NUM_THREADS"] = "4"
+os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
+os.environ["all_proxy"] = ""
+for site_packages_root in site_packages_roots:
+ if os.path.exists(site_packages_root):
+ try:
+ with open("%s/users.pth" % (site_packages_root), "w") as f:
+ f.write(
+ "%s\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
+ % (now_dir, now_dir, now_dir, now_dir, now_dir)
+ )
+ break
+ except PermissionError as e:
+ traceback.print_exc()
+from tools import my_utils
+import shutil
+import pdb
+from subprocess import Popen
+import signal
+from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share
+from tools.i18n.i18n import I18nAuto, scan_language_list
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto"
+os.environ["language"]=language
+i18n = I18nAuto(language=language)
+from scipy.io import wavfile
+from tools.my_utils import load_audio, check_for_existance, check_details
+from multiprocessing import cpu_count
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
+try:
+ import gradio.analytics as analytics
+ analytics.version_check = lambda:None
+except:...
+import gradio as gr
+n_cpu=cpu_count()
+
+ngpu = torch.cuda.device_count()
+gpu_infos = []
+mem = []
+if_gpu_ok = False
+
+# 判断是否有能用来训练和加速推理的N卡
+ok_gpu_keywords={"10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L4","4060","H"}
+set_gpu_numbers=set()
+if torch.cuda.is_available() or ngpu != 0:
+ for i in range(ngpu):
+ gpu_name = torch.cuda.get_device_name(i)
+ if any(value in gpu_name.upper()for value in ok_gpu_keywords):
+ # A10#A100#V100#A40#P40#M40#K80#A4500
+ if_gpu_ok = True # 至少有一张能用的N卡
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
+ set_gpu_numbers.add(i)
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
+# # 判断是否支持mps加速
+# if torch.backends.mps.is_available():
+# if_gpu_ok = True
+# gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
+# mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
+
+if if_gpu_ok and len(gpu_infos) > 0:
+ gpu_info = "\n".join(gpu_infos)
+ default_batch_size = min(mem) // 2
+else:
+ gpu_info = ("%s\t%s" % ("0", "CPU"))
+ gpu_infos.append("%s\t%s" % ("0", "CPU"))
+ set_gpu_numbers.add(0)
+ default_batch_size = int(psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2)
+gpus = "-".join([i[0] for i in gpu_infos])
+default_gpu_numbers=str(sorted(list(set_gpu_numbers))[0])
+def fix_gpu_number(input):#将越界的number强制改到界内
+ try:
+ if(int(input)not in set_gpu_numbers):return default_gpu_numbers
+ except:return input
+ return input
+def fix_gpu_numbers(inputs):
+ output=[]
+ try:
+ for input in inputs.split(","):output.append(str(fix_gpu_number(input)))
+ return ",".join(output)
+ except:
+ return inputs
+
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+pretrained_model_list = (pretrained_sovits_name[-int(version[-1])+2],pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D"),pretrained_gpt_name[-int(version[-1])+2],"GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large","GPT_SoVITS/pretrained_models/chinese-hubert-base")
+
+_=''
+for i in pretrained_model_list:
+ if os.path.exists(i):...
+ else:_+=f'\n {i}'
+if _:
+ print("warning:",i18n('以下模型不存在:')+_)
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
+ else:_[0].append("")##没有下pretrained模型的,说不定他们是想自己从零训底模呢
+ if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
+ else:_[-1].append("")
+pretrained_gpt_name,pretrained_sovits_name = _
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for root in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(root,exist_ok=True)
+def get_weights_names():
+ SoVITS_names = [name for name in pretrained_sovits_name if name!=""]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [name for name in pretrained_gpt_name if name!=""]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+SoVITS_names,GPT_names = get_weights_names()
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names()
+ return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"}
+
+p_label=None
+p_uvr5=None
+p_asr=None
+p_denoise=None
+p_tts_inference=None
+
+def kill_proc_tree(pid, including_parent=True):
+ try:
+ parent = psutil.Process(pid)
+ except psutil.NoSuchProcess:
+ # Process already terminated
+ return
+
+ children = parent.children(recursive=True)
+ for child in children:
+ try:
+ os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
+ except OSError:
+ pass
+ if including_parent:
+ try:
+ os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
+ except OSError:
+ pass
+
+system=platform.system()
+def kill_process(pid):
+ if(system=="Windows"):
+ cmd = "taskkill /t /f /pid %s" % pid
+ os.system(cmd)
+ else:
+ kill_proc_tree(pid)
+
+
+# def change_label(path_list):
+# global p_label
+# if(p_label==None):
+# check_for_existance([path_list])
+# path_list=my_utils.clean_path(path_list)
+# cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share)
+# yield i18n("打标工具WebUI已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+# print(cmd)
+# p_label = Popen(cmd, shell=True)
+# elif(p_label!=None):
+# kill_process(p_label.pid)
+# p_label=None
+# yield i18n("打标工具WebUI已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+# def change_uvr5():
+# global p_uvr5
+# if(p_uvr5==None):
+# cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share)
+# yield i18n("UVR5已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+# print(cmd)
+# p_uvr5 = Popen(cmd, shell=True)
+# elif(p_uvr5!=None):
+# kill_process(p_uvr5.pid)
+# p_uvr5=None
+# yield i18n("UVR5已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+def change_tts_inference(bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path, batched_infer_enabled):
+ global p_tts_inference
+ if batched_infer_enabled:
+ cmd = '"%s" GPT_SoVITS/inference_webui_fast.py "%s"'%(python_exec, language)
+ else:
+ cmd = '"%s" GPT_SoVITS/inference_webui_checkpoint.py "%s"'%(python_exec, language)
+ if(p_tts_inference==None):
+ os.environ["gpt_path"]=gpt_path if "/" in gpt_path else "%s/%s"%(GPT_weight_root,gpt_path)
+ os.environ["sovits_path"]=sovits_path if "/"in sovits_path else "%s/%s"%(SoVITS_weight_root,sovits_path)
+ os.environ["cnhubert_base_path"]=cnhubert_base_path
+ os.environ["bert_path"]=bert_path
+ os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_number(gpu_number)
+ os.environ["is_half"]=str(is_half)
+ os.environ["infer_ttswebui"]=str(webui_port_infer_tts)
+ os.environ["is_share"]=str(is_share)
+ yield i18n("TTS推理进程已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+ print(cmd)
+ p_tts_inference = Popen(cmd, shell=True)
+ elif(p_tts_inference!=None):
+ kill_process(p_tts_inference.pid)
+ p_tts_inference=None
+ yield i18n("TTS推理进程已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+# from tools.asr.config import asr_dict
+# def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_precision):
+# global p_asr
+# if(p_asr==None):
+# asr_inp_dir=my_utils.clean_path(asr_inp_dir)
+# asr_opt_dir=my_utils.clean_path(asr_opt_dir)
+# check_for_existance([asr_inp_dir])
+# cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
+# cmd += f' -i "{asr_inp_dir}"'
+# cmd += f' -o "{asr_opt_dir}"'
+# cmd += f' -s {asr_model_size}'
+# cmd += f' -l {asr_lang}'
+# cmd += f" -p {asr_precision}"
+# output_file_name = os.path.basename(asr_inp_dir)
+# output_folder = asr_opt_dir or "output/asr_opt"
+# output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
+# yield "ASR任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
+# print(cmd)
+# p_asr = Popen(cmd, shell=True)
+# p_asr.wait()
+# p_asr=None
+# yield f"ASR任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":asr_inp_dir}
+# else:
+# yield "已有正在进行的ASR任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
+# # return None
+
+# def close_asr():
+# global p_asr
+# if(p_asr!=None):
+# kill_process(p_asr.pid)
+# p_asr=None
+# return "已终止ASR进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# def open_denoise(denoise_inp_dir, denoise_opt_dir):
+# global p_denoise
+# if(p_denoise==None):
+# denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
+# denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
+# check_for_existance([denoise_inp_dir])
+# cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
+#
+# yield "语音降噪任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
+# print(cmd)
+# p_denoise = Popen(cmd, shell=True)
+# p_denoise.wait()
+# p_denoise=None
+# yield f"语音降噪任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":denoise_opt_dir}, {"__type__":"update","value":denoise_opt_dir}
+# else:
+# yield "已有正在进行的语音降噪任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
+ # return None
+
+# def close_denoise():
+# global p_denoise
+# if(p_denoise!=None):
+# kill_process(p_denoise.pid)
+# p_denoise=None
+# return "已终止语音降噪进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+p_train_SoVITS=None
+# def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D):
+# global p_train_SoVITS
+# if(p_train_SoVITS==None):
+# with open("GPT_SoVITS/configs/s2.json")as f:
+# data=f.read()
+# data=json.loads(data)
+# s2_dir="%s/%s"%(exp_root,exp_name)
+# os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True)
+# if check_for_existance([s2_dir],is_train=True):
+# check_details([s2_dir],is_train=True)
+# if(is_half==False):
+# data["train"]["fp16_run"]=False
+# batch_size=max(1,batch_size//2)
+# data["train"]["batch_size"]=batch_size
+# data["train"]["epochs"]=total_epoch
+# data["train"]["text_low_lr_rate"]=text_low_lr_rate
+# data["train"]["pretrained_s2G"]=pretrained_s2G
+# data["train"]["pretrained_s2D"]=pretrained_s2D
+# data["train"]["if_save_latest"]=if_save_latest
+# data["train"]["if_save_every_weights"]=if_save_every_weights
+# data["train"]["save_every_epoch"]=save_every_epoch
+# data["train"]["gpu_numbers"]=gpu_numbers1Ba
+# data["model"]["version"]=version
+# data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
+# data["save_weight_dir"]=SoVITS_weight_root[-int(version[-1])+2]
+# data["name"]=exp_name
+# data["version"]=version
+# tmp_config_path="%s/tmp_s2.json"%tmp
+# with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
+#
+# cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path)
+# yield "SoVITS训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+# print(cmd)
+# p_train_SoVITS = Popen(cmd, shell=True)
+# p_train_SoVITS.wait()
+# p_train_SoVITS=None
+# yield "SoVITS训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+
+# def close1Ba():
+# global p_train_SoVITS
+# if(p_train_SoVITS!=None):
+# kill_process(p_train_SoVITS.pid)
+# p_train_SoVITS=None
+# return "已终止SoVITS训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+p_train_GPT=None
+# def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
+# global p_train_GPT
+# if(p_train_GPT==None):
+# with open("GPT_SoVITS/configs/s1longer.yaml"if version=="v1"else "GPT_SoVITS/configs/s1longer-v2.yaml")as f:
+# data=f.read()
+# data=yaml.load(data, Loader=yaml.FullLoader)
+# s1_dir="%s/%s"%(exp_root,exp_name)
+# os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
+# if check_for_existance([s1_dir],is_train=True):
+# check_details([s1_dir],is_train=True)
+# if(is_half==False):
+# data["train"]["precision"]="32"
+# batch_size = max(1, batch_size // 2)
+# data["train"]["batch_size"]=batch_size
+# data["train"]["epochs"]=total_epoch
+# data["pretrained_s1"]=pretrained_s1
+# data["train"]["save_every_n_epoch"]=save_every_epoch
+# data["train"]["if_save_every_weights"]=if_save_every_weights
+# data["train"]["if_save_latest"]=if_save_latest
+# data["train"]["if_dpo"]=if_dpo
+# data["train"]["half_weights_save_dir"]=GPT_weight_root[-int(version[-1])+2]
+# data["train"]["exp_name"]=exp_name
+# data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir
+# data["train_phoneme_path"]="%s/2-name2text.txt"%s1_dir
+# data["output_dir"]="%s/logs_s1"%s1_dir
+# # data["version"]=version
+#
+# os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_numbers(gpu_numbers.replace("-",","))
+# os.environ["hz"]="25hz"
+# tmp_config_path="%s/tmp_s1.yaml"%tmp
+# with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False))
+# # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir)
+# cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path)
+# yield "GPT训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+# print(cmd)
+# p_train_GPT = Popen(cmd, shell=True)
+# p_train_GPT.wait()
+# p_train_GPT=None
+# yield "GPT训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+#
+# def close1Bb():
+# global p_train_GPT
+# if(p_train_GPT!=None):
+# kill_process(p_train_GPT.pid)
+# p_train_GPT=None
+# return "已终止GPT训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+# ps_slice=[]
+# def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
+# global ps_slice
+# inp = my_utils.clean_path(inp)
+# opt_root = my_utils.clean_path(opt_root)
+# check_for_existance([inp])
+# if(os.path.exists(inp)==False):
+# yield "输入路径不存在", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# return
+# if os.path.isfile(inp):n_parts=1
+# elif os.path.isdir(inp):pass
+# else:
+# yield "输入路径存在但既不是文件也不是文件夹", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# return
+# if (ps_slice == []):
+# for i_part in range(n_parts):
+# cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps_slice.append(p)
+# yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# for p in ps_slice:
+# p.wait()
+# ps_slice=[]
+# yield "切割结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}
+# else:
+# yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+#
+# def close_slice():
+# global ps_slice
+# if (ps_slice != []):
+# for p_slice in ps_slice:
+# try:
+# kill_process(p_slice.pid)
+# except:
+# traceback.print_exc()
+# ps_slice=[]
+# return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+# ps1a=[]
+# def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
+# global ps1a
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1a == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "bert_pretrained_dir":bert_pretrained_dir,
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# "is_half": str(is_half)
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1a.append(p)
+# yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1a:
+# p.wait()
+# opt = []
+# for i_part in range(all_parts):
+# txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
+# with open(txt_path, "r", encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(txt_path)
+# path_text = "%s/2-name2text.txt" % opt_dir
+# with open(path_text, "w", encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# ps1a=[]
+# if len("".join(opt)) > 0:
+# yield "文本进程成功", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "文本进程失败", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1a():
+# global ps1a
+# if (ps1a != []):
+# for p1a in ps1a:
+# try:
+# kill_process(p1a.pid)
+# except:
+# traceback.print_exc()
+# ps1a=[]
+# return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+#
+# ps1b=[]
+# def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
+# global ps1b
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1b == []):
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":"%s/%s"%(exp_root,exp_name),
+# "cnhubert_base_dir":ssl_pretrained_dir,
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1b.append(p)
+# yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1b:
+# p.wait()
+# ps1b=[]
+# yield "SSL提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+#
+# def close1b():
+# global ps1b
+# if (ps1b != []):
+# for p1b in ps1b:
+# try:
+# kill_process(p1b.pid)
+# except:
+# traceback.print_exc()
+# ps1b=[]
+# return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+# ps1c=[]
+# def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
+# global ps1c
+# inp_text = my_utils.clean_path(inp_text)
+# if check_for_existance([inp_text,''], is_dataset_processing=True):
+# check_details([inp_text,''], is_dataset_processing=True)
+# if (ps1c == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# config={
+# "inp_text":inp_text,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "pretrained_s2G":pretrained_s2G_path,
+# "s2config_path":"GPT_SoVITS/configs/s2.json",
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1c.append(p)
+# yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1c:
+# p.wait()
+# opt = ["item_name\tsemantic_audio"]
+# path_semantic = "%s/6-name2semantic.tsv" % opt_dir
+# for i_part in range(all_parts):
+# semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
+# with open(semantic_path, "r", encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(semantic_path)
+# with open(path_semantic, "w", encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# ps1c=[]
+# yield "语义token提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1c():
+# global ps1c
+# if (ps1c != []):
+# for p1c in ps1c:
+# try:
+# kill_process(p1c.pid)
+# except:
+# traceback.print_exc()
+# ps1c=[]
+# return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+#####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G
+# ps1abc=[]
+# def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path):
+# global ps1abc
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1abc == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# try:
+# #############################1a
+# path_text="%s/2-name2text.txt" % opt_dir
+# if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and len(open(path_text,"r",encoding="utf8").read().strip("\n").split("\n"))<2)):
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "bert_pretrained_dir":bert_pretrained_dir,
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers1a.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+#
+# opt = []
+# for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
+# txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
+# with open(txt_path, "r",encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(txt_path)
+# with open(path_text, "w",encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# assert len("".join(opt)) > 0, "1Aa-文本获取进程失败"
+# yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc=[]
+# #############################1b
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "cnhubert_base_dir":ssl_pretrained_dir,
+# }
+# gpu_names=gpu_numbers1Ba.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+# yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc=[]
+# #############################1c
+# path_semantic = "%s/6-name2semantic.tsv" % opt_dir
+# if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)):
+# config={
+# "inp_text":inp_text,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "pretrained_s2G":pretrained_s2G_path,
+# "s2config_path":"GPT_SoVITS/configs/s2.json",
+# }
+# gpu_names=gpu_numbers1c.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+#
+# opt = ["item_name\tsemantic_audio"]
+# for i_part in range(all_parts):
+# semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
+# with open(semantic_path, "r",encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(semantic_path)
+# with open(path_semantic, "w",encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc = []
+# yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# except:
+# traceback.print_exc()
+# close1abc()
+# yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1abc():
+# global ps1abc
+# if (ps1abc != []):
+# for p1abc in ps1abc:
+# try:
+# kill_process(p1abc.pid)
+# except:
+# traceback.print_exc()
+# ps1abc=[]
+# return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+def switch_version(version_):
+ os.environ['version']=version_
+ global version
+ version = version_
+ if pretrained_sovits_name[-int(version[-1])+2] !='' and pretrained_gpt_name[-int(version[-1])+2] !='':...
+ else:
+ gr.Warning(i18n(f'未下载{version.upper()}模型'))
+ return {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D")}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}
+
+if os.path.exists('GPT_SoVITS/text/G2PWModel'):...
+else:
+ cmd = '"%s" GPT_SoVITS/download.py'%python_exec
+ p = Popen(cmd, shell=True)
+ p.wait()
+
+def sync(text):
+ return {'__type__':'update','value':text}
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ gr.Markdown(
+ value=
+ i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ # gr.Markdown(
+ # value=
+ # i18n("中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e")
+ # )
+
+ with gr.Tabs():
+ # with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ # gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # uvr5_info = gr.Textbox(label=i18n("UVR5进程输出信息"))
+ # open_uvr5 = gr.Button(value=i18n("开启UVR5-WebUI"),variant="primary",visible=True)
+ # close_uvr5 = gr.Button(value=i18n("关闭UVR5-WebUI"),variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0b-语音切分工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),value="")
+ # slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt")
+ # with gr.Row():
+ # threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34")
+ # min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000")
+ # min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300")
+ # hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10")
+ # max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500")
+ # with gr.Row():
+ # _max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True)
+ # alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
+ # with gr.Row():
+ # n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
+ # slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
+ # open_slicer_button=gr.Button(i18n("开启语音切割"), variant="primary",visible=True)
+ # close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0bb-语音降噪工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # denoise_input_dir=gr.Textbox(label=i18n("降噪音频文件输入文件夹"),value="")
+ # denoise_output_dir=gr.Textbox(label=i18n("降噪结果输出文件夹"),value="output/denoise_opt")
+ # with gr.Row():
+ # denoise_info = gr.Textbox(label=i18n("语音降噪进程输出信息"))
+ # open_denoise_button = gr.Button(i18n("开启语音降噪"), variant="primary",visible=True)
+ # close_denoise_button = gr.Button(i18n("终止语音降噪进程"), variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0c-中文批量离线ASR工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # asr_inp_dir = gr.Textbox(
+ # label=i18n("输入文件夹路径"),
+ # value="D:\\GPT-SoVITS\\raw\\xxx",
+ # interactive=True,
+ # )
+ # asr_opt_dir = gr.Textbox(
+ # label = i18n("输出文件夹路径"),
+ # value = "output/asr_opt",
+ # interactive = True,
+ # )
+ # with gr.Row():
+ # asr_model = gr.Dropdown(
+ # label = i18n("ASR 模型"),
+ # choices = list(asr_dict.keys()),
+ # interactive = True,
+ # value="达摩 ASR (中文)"
+ # )
+ # asr_size = gr.Dropdown(
+ # label = i18n("ASR 模型尺寸"),
+ # choices = ["large"],
+ # interactive = True,
+ # value="large"
+ # )
+ # asr_lang = gr.Dropdown(
+ # label = i18n("ASR 语言设置"),
+ # choices = ["zh","yue"],
+ # interactive = True,
+ # value="zh"
+ # )
+ # asr_precision = gr.Dropdown(
+ # label = i18n("数据类型精度"),
+ # choices = ["float32"],
+ # interactive = True,
+ # value="float32"
+ # )
+ # with gr.Row():
+ # asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
+ # open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True)
+ # close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
+ #
+ # def change_lang_choices(key): #根据选择的模型修改可选的语言
+ # # return gr.Dropdown(choices=asr_dict[key]['lang'])
+ # return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]}
+ # def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
+ # # return gr.Dropdown(choices=asr_dict[key]['size'])
+ # return {"__type__": "update", "choices": asr_dict[key]['size'],"value":asr_dict[key]['size'][-1]}
+ # def change_precision_choices(key): #根据选择的模型修改可选的语言
+ # if key =="Faster Whisper (多语种)":
+ # if default_batch_size <= 4:
+ # precision = 'int8'
+ # elif is_half:
+ # precision = 'float16'
+ # else:
+ # precision = 'float32'
+ # else:
+ # precision = 'float32'
+ # # return gr.Dropdown(choices=asr_dict[key]['precision'])
+ # return {"__type__": "update", "choices": asr_dict[key]['precision'],"value":precision}
+ # asr_model.change(change_lang_choices, [asr_model], [asr_lang])
+ # asr_model.change(change_size_choices, [asr_model], [asr_size])
+ # asr_model.change(change_precision_choices, [asr_model], [asr_precision])
+ #
+ #
+ # gr.Markdown(value=i18n("0d-语音文本校对标注工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # path_list = gr.Textbox(
+ # label=i18n(".list标注文件的路径"),
+ # value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list",
+ # interactive=True,
+ # )
+ # label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
+ #
+ # open_label = gr.Button(value=i18n("开启打标WebUI"),variant="primary",visible=True)
+ # close_label = gr.Button(value=i18n("关闭打标WebUI"),variant="primary",visible=False)
+ # open_label.click(change_label, [path_list], [label_info,open_label,close_label])
+ # close_label.click(change_label, [path_list], [label_info,open_label,close_label])
+ # open_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
+ # close_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
+
+ with gr.TabItem(i18n("1-GPT-SoVITS-TTS")):
+ with gr.Row():
+ with gr.Row():
+ # exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True)
+ gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False)
+ version_checkbox = gr.Radio(label=i18n("版本"),value=version,choices=['v1','v2'])
+ with gr.Row():
+ pretrained_s2G = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[-int(version[-1])+2], interactive=True, lines=2, max_lines=3,scale=9)
+ pretrained_s2D = gr.Textbox(label=i18n("预训练的SoVITS-D模型路径"), value=pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D"), interactive=True, lines=2, max_lines=3,scale=9)
+ pretrained_s1 = gr.Textbox(label=i18n("预训练的GPT模型路径"), value=pretrained_gpt_name[-int(version[-1])+2], interactive=True, lines=2, max_lines=3,scale=10)
+ bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),
+ value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
+ interactive=False, lines=2)
+ cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),
+ value="GPT_SoVITS/pretrained_models/chinese-hubert-base",
+ interactive=False, lines=2)
+ # with gr.TabItem(i18n("1A-训练集格式化工具")):
+ # gr.Markdown(value=i18n("输出logs/实验名目录下应有23456开头的文件和文件夹"))
+ # with gr.Row():
+ # with gr.Row():
+ # inp_text = gr.Textbox(label=i18n("*文本标注文件"),value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True,scale=10)
+ # with gr.Row():
+ # inp_wav_dir = gr.Textbox(
+ # label=i18n("*训练集音频文件目录"),
+ # # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",
+ # interactive=True,
+ # placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。"), scale=10
+ # )
+ # gr.Markdown(value=i18n("1Aa-文本内容"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False,lines=2)
+ # with gr.Row():
+ # button1a_open = gr.Button(i18n("开启文本获取"), variant="primary",visible=True)
+ # button1a_close = gr.Button(i18n("终止文本获取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1a=gr.Textbox(label=i18n("文本进程输出信息"))
+ # gr.Markdown(value=i18n("1Ab-SSL自监督特征提取"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False,lines=2)
+ # with gr.Row():
+ # button1b_open = gr.Button(i18n("开启SSL提取"), variant="primary",visible=True)
+ # button1b_close = gr.Button(i18n("终止SSL提取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1b=gr.Textbox(label=i18n("SSL进程输出信息"))
+ # gr.Markdown(value=i18n("1Ac-语义token提取"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1c = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # pretrained_s2G_ = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[-int(version[-1])+2], interactive=False,lines=2)
+ # with gr.Row():
+ # button1c_open = gr.Button(i18n("开启语义token提取"), variant="primary",visible=True)
+ # button1c_close = gr.Button(i18n("终止语义token提取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1c=gr.Textbox(label=i18n("语义token提取进程输出信息"))
+ # gr.Markdown(value=i18n("1Aabc-训练集格式化一键三连"))
+ # with gr.Row():
+ # with gr.Row():
+ # button1abc_open = gr.Button(i18n("开启一键三连"), variant="primary",visible=True)
+ # button1abc_close = gr.Button(i18n("终止一键三连"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1abc=gr.Textbox(label=i18n("一键三连进程输出信息"))
+ #
+ # pretrained_s2G.change(sync,[pretrained_s2G],[pretrained_s2G_])
+ # open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang, asr_precision], [asr_info,open_asr_button,close_asr_button,path_list,inp_text,inp_wav_dir])
+ # close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
+ # open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button,asr_inp_dir,denoise_input_dir,inp_wav_dir])
+ # close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
+ # open_denoise_button.click(open_denoise, [denoise_input_dir,denoise_output_dir], [denoise_info,open_denoise_button,close_denoise_button,asr_inp_dir,inp_wav_dir])
+ # close_denoise_button.click(close_denoise, [], [denoise_info,open_denoise_button,close_denoise_button])
+ #
+ # button1a_open.click(open1a, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,bert_pretrained_dir], [info1a,button1a_open,button1a_close])
+ # button1a_close.click(close1a, [], [info1a,button1a_open,button1a_close])
+ # button1b_open.click(open1b, [inp_text,inp_wav_dir,exp_name,gpu_numbers1Ba,cnhubert_base_dir], [info1b,button1b_open,button1b_close])
+ # button1b_close.click(close1b, [], [info1b,button1b_open,button1b_close])
+ # button1c_open.click(open1c, [inp_text,exp_name,gpu_numbers1c,pretrained_s2G], [info1c,button1c_open,button1c_close])
+ # button1c_close.click(close1c, [], [info1c,button1c_open,button1c_close])
+ # button1abc_open.click(open1abc, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G], [info1abc,button1abc_open,button1abc_close])
+ # button1abc_close.click(close1abc, [], [info1abc,button1abc_open,button1abc_close])
+ # with gr.TabItem(i18n("1B-微调训练")):
+ # gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。"))
+ # with gr.Row():
+ # with gr.Column():
+ # with gr.Row():
+ # batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
+ # total_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True)
+ # with gr.Row():
+ # text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True)
+ # save_every_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True)
+ # with gr.Column():
+ # with gr.Column():
+ # if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
+ # if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
+ # with gr.Row():
+ # gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
+ # with gr.Row():
+ # with gr.Row():
+ # button1Ba_open = gr.Button(i18n("开启SoVITS训练"), variant="primary",visible=True)
+ # button1Ba_close = gr.Button(i18n("终止SoVITS训练"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1Ba=gr.Textbox(label=i18n("SoVITS训练进程输出信息"))
+ # gr.Markdown(value=i18n("1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。"))
+ # with gr.Row():
+ # with gr.Column():
+ # with gr.Row():
+ # batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
+ # total_epoch1Bb = gr.Slider(minimum=2,maximum=50,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True)
+ # with gr.Row():
+ # save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True)
+ # if_dpo = gr.Checkbox(label=i18n("是否开启dpo训练选项(实验性)"), value=False, interactive=True, show_label=True)
+ # with gr.Column():
+ # with gr.Column():
+ # if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
+ # if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
+ # with gr.Row():
+ # gpu_numbers1Bb = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
+ # with gr.Row():
+ # with gr.Row():
+ # button1Bb_open = gr.Button(i18n("开启GPT训练"), variant="primary",visible=True)
+ # button1Bb_close = gr.Button(i18n("终止GPT训练"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1Bb=gr.Textbox(label=i18n("GPT训练进程输出信息"))
+ # button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Ba,button1Ba_open,button1Ba_close])
+ # button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close])
+ # button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_dpo,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close])
+ # button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close])
+ with gr.TabItem(i18n("推理")):
+ gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。"))
+ with gr.Row():
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names,key=custom_sort_key),value=pretrained_gpt_name[0],interactive=True)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names,key=custom_sort_key),value=pretrained_sovits_name[0],interactive=True)
+ with gr.Row():
+ gpu_number_1C=gr.Textbox(label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
+ refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown])
+ with gr.Row():
+ with gr.Row():
+ batched_infer_enabled = gr.Checkbox(label=i18n("启用并行推理版本(推理速度更快)"), value=False, interactive=True, show_label=True)
+ with gr.Row():
+ open_tts = gr.Button(value=i18n("开启TTS推理WebUI"),variant='primary',visible=True)
+ close_tts = gr.Button(value=i18n("关闭TTS推理WebUI"),variant='primary',visible=False)
+ with gr.Row():
+ tts_info = gr.Textbox(label=i18n("TTS推理WebUI进程输出信息"))
+ open_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
+ close_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
+ version_checkbox.change(switch_version,[version_checkbox],[pretrained_s2G,pretrained_s2D,pretrained_s1,GPT_dropdown,SoVITS_dropdown])
+ # with gr.TabItem(i18n("2-GPT-SoVITS-变声")):gr.Markdown(value=i18n("施工中,请静候佳音"))
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=webui_port_main,
+ quiet=True,
+ )
diff --git a/webui_test.py b/webui_test.py
new file mode 100644
index 00000000..261a8a92
--- /dev/null
+++ b/webui_test.py
@@ -0,0 +1,1060 @@
+import os,sys
+if len(sys.argv)==1:sys.argv.append('v2')
+version="v1"if sys.argv[1]=="v1" else"v2"
+os.environ["version"]=version
+now_dir = os.getcwd()
+sys.path.insert(0, now_dir)
+import warnings
+warnings.filterwarnings("ignore")
+import json,yaml,torch,pdb,re,shutil
+import platform
+import psutil
+import signal
+torch.manual_seed(233333)
+tmp = os.path.join(now_dir, "TEMP")
+os.makedirs(tmp, exist_ok=True)
+os.environ["TEMP"] = tmp
+if(os.path.exists(tmp)):
+ for name in os.listdir(tmp):
+ if(name=="jieba.cache"):continue
+ path="%s/%s"%(tmp,name)
+ delete=os.remove if os.path.isfile(path) else shutil.rmtree
+ try:
+ delete(path)
+ except Exception as e:
+ print(str(e))
+ pass
+import site
+import traceback
+site_packages_roots = []
+for path in site.getsitepackages():
+ if "packages" in path:
+ site_packages_roots.append(path)
+if(site_packages_roots==[]):site_packages_roots=["%s/runtime/Lib/site-packages" % now_dir]
+#os.environ["OPENBLAS_NUM_THREADS"] = "4"
+os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
+os.environ["all_proxy"] = ""
+for site_packages_root in site_packages_roots:
+ if os.path.exists(site_packages_root):
+ try:
+ with open("%s/users.pth" % (site_packages_root), "w") as f:
+ f.write(
+ "%s\n%s/tools\n%s/tools/asr\n%s/GPT_SoVITS\n%s/tools/uvr5"
+ % (now_dir, now_dir, now_dir, now_dir, now_dir)
+ )
+ break
+ except PermissionError as e:
+ traceback.print_exc()
+from tools import my_utils
+import shutil
+import pdb
+from subprocess import Popen
+import signal
+from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share
+from tools.i18n.i18n import I18nAuto, scan_language_list
+language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else "Auto"
+os.environ["language"]=language
+i18n = I18nAuto(language=language)
+from scipy.io import wavfile
+from tools.my_utils import load_audio, check_for_existance, check_details
+from multiprocessing import cpu_count
+# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
+try:
+ import gradio.analytics as analytics
+ analytics.version_check = lambda:None
+except:...
+import gradio as gr
+n_cpu=cpu_count()
+
+ngpu = torch.cuda.device_count()
+gpu_infos = []
+mem = []
+if_gpu_ok = False
+
+# 判断是否有能用来训练和加速推理的N卡
+ok_gpu_keywords={"10","16","20","30","40","A2","A3","A4","P4","A50","500","A60","70","80","90","M4","T4","TITAN","L4","4060","H"}
+set_gpu_numbers=set()
+if torch.cuda.is_available() or ngpu != 0:
+ for i in range(ngpu):
+ gpu_name = torch.cuda.get_device_name(i)
+ if any(value in gpu_name.upper()for value in ok_gpu_keywords):
+ # A10#A100#V100#A40#P40#M40#K80#A4500
+ if_gpu_ok = True # 至少有一张能用的N卡
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
+ set_gpu_numbers.add(i)
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
+# # 判断是否支持mps加速
+# if torch.backends.mps.is_available():
+# if_gpu_ok = True
+# gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
+# mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
+
+if if_gpu_ok and len(gpu_infos) > 0:
+ gpu_info = "\n".join(gpu_infos)
+ default_batch_size = min(mem) // 2
+else:
+ gpu_info = ("%s\t%s" % ("0", "CPU"))
+ gpu_infos.append("%s\t%s" % ("0", "CPU"))
+ set_gpu_numbers.add(0)
+ default_batch_size = int(psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2)
+gpus = "-".join([i[0] for i in gpu_infos])
+default_gpu_numbers=str(sorted(list(set_gpu_numbers))[0])
+def fix_gpu_number(input):#将越界的number强制改到界内
+ try:
+ if(int(input)not in set_gpu_numbers):return default_gpu_numbers
+ except:return input
+ return input
+def fix_gpu_numbers(inputs):
+ output=[]
+ try:
+ for input in inputs.split(","):output.append(str(fix_gpu_number(input)))
+ return ",".join(output)
+ except:
+ return inputs
+
+pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
+pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
+
+pretrained_model_list = (pretrained_sovits_name[-int(version[-1])+2],pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D"),pretrained_gpt_name[-int(version[-1])+2],"GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large","GPT_SoVITS/pretrained_models/chinese-hubert-base")
+
+_=''
+for i in pretrained_model_list:
+ if os.path.exists(i):...
+ else:_+=f'\n {i}'
+if _:
+ print("warning:",i18n('以下模型不存在:')+_)
+
+_ =[[],[]]
+for i in range(2):
+ if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
+ else:_[0].append("")##没有下pretrained模型的,说不定他们是想自己从零训底模呢
+ if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
+ else:_[-1].append("")
+pretrained_gpt_name,pretrained_sovits_name = _
+
+SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
+GPT_weight_root=["GPT_weights_v2","GPT_weights"]
+for root in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(root,exist_ok=True)
+def get_weights_names():
+ SoVITS_names = [name for name in pretrained_sovits_name if name!=""]
+ for path in SoVITS_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
+ GPT_names = [name for name in pretrained_gpt_name if name!=""]
+ for path in GPT_weight_root:
+ for name in os.listdir(path):
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
+ return SoVITS_names, GPT_names
+
+SoVITS_names,GPT_names = get_weights_names()
+for path in SoVITS_weight_root+GPT_weight_root:
+ os.makedirs(path,exist_ok=True)
+
+
+def custom_sort_key(s):
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
+ parts = re.split('(\d+)', s)
+ # 将数字部分转换为整数,非数字部分保持不变
+ parts = [int(part) if part.isdigit() else part for part in parts]
+ return parts
+
+def change_choices():
+ SoVITS_names, GPT_names = get_weights_names()
+ return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"}
+
+p_label=None
+p_uvr5=None
+p_asr=None
+p_denoise=None
+p_tts_inference=None
+
+def kill_proc_tree(pid, including_parent=True):
+ try:
+ parent = psutil.Process(pid)
+ except psutil.NoSuchProcess:
+ # Process already terminated
+ return
+
+ children = parent.children(recursive=True)
+ for child in children:
+ try:
+ os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
+ except OSError:
+ pass
+ if including_parent:
+ try:
+ os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
+ except OSError:
+ pass
+
+system=platform.system()
+def kill_process(pid):
+ if(system=="Windows"):
+ cmd = "taskkill /t /f /pid %s" % pid
+ os.system(cmd)
+ else:
+ kill_proc_tree(pid)
+
+
+# def change_label(path_list):
+# global p_label
+# if(p_label==None):
+# check_for_existance([path_list])
+# path_list=my_utils.clean_path(path_list)
+# cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share)
+# yield i18n("打标工具WebUI已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+# print(cmd)
+# p_label = Popen(cmd, shell=True)
+# elif(p_label!=None):
+# kill_process(p_label.pid)
+# p_label=None
+# yield i18n("打标工具WebUI已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+# def change_uvr5():
+# global p_uvr5
+# if(p_uvr5==None):
+# cmd = '"%s" tools/uvr5/webui.py "%s" %s %s %s'%(python_exec,infer_device,is_half,webui_port_uvr5,is_share)
+# yield i18n("UVR5已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+# print(cmd)
+# p_uvr5 = Popen(cmd, shell=True)
+# elif(p_uvr5!=None):
+# kill_process(p_uvr5.pid)
+# p_uvr5=None
+# yield i18n("UVR5已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+def change_tts_inference(bert_path,cnhubert_base_path,gpu_number,gpt_path,sovits_path, batched_infer_enabled):
+ global p_tts_inference
+ if batched_infer_enabled:
+ cmd = '"%s" GPT_SoVITS/inference_webui_fast.py "%s"'%(python_exec, language)
+ else:
+ cmd = '"%s" GPT_SoVITS/inference_webui_test.py "%s"'%(python_exec, language)
+ if(p_tts_inference==None):
+ os.environ["gpt_path"]=gpt_path if "/" in gpt_path else "%s/%s"%(GPT_weight_root,gpt_path)
+ os.environ["sovits_path"]=sovits_path if "/"in sovits_path else "%s/%s"%(SoVITS_weight_root,sovits_path)
+ os.environ["cnhubert_base_path"]=cnhubert_base_path
+ os.environ["bert_path"]=bert_path
+ os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_number(gpu_number)
+ os.environ["is_half"]=str(is_half)
+ os.environ["infer_ttswebui"]=str(webui_port_infer_tts)
+ os.environ["is_share"]=str(is_share)
+ yield i18n("TTS推理进程已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True}
+ print(cmd)
+ p_tts_inference = Popen(cmd, shell=True)
+ elif(p_tts_inference!=None):
+ kill_process(p_tts_inference.pid)
+ p_tts_inference=None
+ yield i18n("TTS推理进程已关闭"), {'__type__':'update','visible':True}, {'__type__':'update','visible':False}
+
+# from tools.asr.config import asr_dict
+# def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_precision):
+# global p_asr
+# if(p_asr==None):
+# asr_inp_dir=my_utils.clean_path(asr_inp_dir)
+# asr_opt_dir=my_utils.clean_path(asr_opt_dir)
+# check_for_existance([asr_inp_dir])
+# cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
+# cmd += f' -i "{asr_inp_dir}"'
+# cmd += f' -o "{asr_opt_dir}"'
+# cmd += f' -s {asr_model_size}'
+# cmd += f' -l {asr_lang}'
+# cmd += f" -p {asr_precision}"
+# output_file_name = os.path.basename(asr_inp_dir)
+# output_folder = asr_opt_dir or "output/asr_opt"
+# output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
+# yield "ASR任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
+# print(cmd)
+# p_asr = Popen(cmd, shell=True)
+# p_asr.wait()
+# p_asr=None
+# yield f"ASR任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":output_file_path}, {"__type__":"update","value":asr_inp_dir}
+# else:
+# yield "已有正在进行的ASR任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}, {"__type__":"update"}
+# # return None
+
+# def close_asr():
+# global p_asr
+# if(p_asr!=None):
+# kill_process(p_asr.pid)
+# p_asr=None
+# return "已终止ASR进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# def open_denoise(denoise_inp_dir, denoise_opt_dir):
+# global p_denoise
+# if(p_denoise==None):
+# denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
+# denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
+# check_for_existance([denoise_inp_dir])
+# cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
+#
+# yield "语音降噪任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
+# print(cmd)
+# p_denoise = Popen(cmd, shell=True)
+# p_denoise.wait()
+# p_denoise=None
+# yield f"语音降噪任务完成, 查看终端进行下一步", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__":"update","value":denoise_opt_dir}, {"__type__":"update","value":denoise_opt_dir}
+# else:
+# yield "已有正在进行的语音降噪任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
+ # return None
+
+# def close_denoise():
+# global p_denoise
+# if(p_denoise!=None):
+# kill_process(p_denoise.pid)
+# p_denoise=None
+# return "已终止语音降噪进程", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+p_train_SoVITS=None
+# def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D):
+# global p_train_SoVITS
+# if(p_train_SoVITS==None):
+# with open("GPT_SoVITS/configs/s2.json")as f:
+# data=f.read()
+# data=json.loads(data)
+# s2_dir="%s/%s"%(exp_root,exp_name)
+# os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True)
+# if check_for_existance([s2_dir],is_train=True):
+# check_details([s2_dir],is_train=True)
+# if(is_half==False):
+# data["train"]["fp16_run"]=False
+# batch_size=max(1,batch_size//2)
+# data["train"]["batch_size"]=batch_size
+# data["train"]["epochs"]=total_epoch
+# data["train"]["text_low_lr_rate"]=text_low_lr_rate
+# data["train"]["pretrained_s2G"]=pretrained_s2G
+# data["train"]["pretrained_s2D"]=pretrained_s2D
+# data["train"]["if_save_latest"]=if_save_latest
+# data["train"]["if_save_every_weights"]=if_save_every_weights
+# data["train"]["save_every_epoch"]=save_every_epoch
+# data["train"]["gpu_numbers"]=gpu_numbers1Ba
+# data["model"]["version"]=version
+# data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
+# data["save_weight_dir"]=SoVITS_weight_root[-int(version[-1])+2]
+# data["name"]=exp_name
+# data["version"]=version
+# tmp_config_path="%s/tmp_s2.json"%tmp
+# with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
+#
+# cmd = '"%s" GPT_SoVITS/s2_train.py --config "%s"'%(python_exec,tmp_config_path)
+# yield "SoVITS训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+# print(cmd)
+# p_train_SoVITS = Popen(cmd, shell=True)
+# p_train_SoVITS.wait()
+# p_train_SoVITS=None
+# yield "SoVITS训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的SoVITS训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+
+# def close1Ba():
+# global p_train_SoVITS
+# if(p_train_SoVITS!=None):
+# kill_process(p_train_SoVITS.pid)
+# p_train_SoVITS=None
+# return "已终止SoVITS训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+p_train_GPT=None
+# def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers,pretrained_s1):
+# global p_train_GPT
+# if(p_train_GPT==None):
+# with open("GPT_SoVITS/configs/s1longer.yaml"if version=="v1"else "GPT_SoVITS/configs/s1longer-v2.yaml")as f:
+# data=f.read()
+# data=yaml.load(data, Loader=yaml.FullLoader)
+# s1_dir="%s/%s"%(exp_root,exp_name)
+# os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
+# if check_for_existance([s1_dir],is_train=True):
+# check_details([s1_dir],is_train=True)
+# if(is_half==False):
+# data["train"]["precision"]="32"
+# batch_size = max(1, batch_size // 2)
+# data["train"]["batch_size"]=batch_size
+# data["train"]["epochs"]=total_epoch
+# data["pretrained_s1"]=pretrained_s1
+# data["train"]["save_every_n_epoch"]=save_every_epoch
+# data["train"]["if_save_every_weights"]=if_save_every_weights
+# data["train"]["if_save_latest"]=if_save_latest
+# data["train"]["if_dpo"]=if_dpo
+# data["train"]["half_weights_save_dir"]=GPT_weight_root[-int(version[-1])+2]
+# data["train"]["exp_name"]=exp_name
+# data["train_semantic_path"]="%s/6-name2semantic.tsv"%s1_dir
+# data["train_phoneme_path"]="%s/2-name2text.txt"%s1_dir
+# data["output_dir"]="%s/logs_s1"%s1_dir
+# # data["version"]=version
+#
+# os.environ["_CUDA_VISIBLE_DEVICES"]=fix_gpu_numbers(gpu_numbers.replace("-",","))
+# os.environ["hz"]="25hz"
+# tmp_config_path="%s/tmp_s1.yaml"%tmp
+# with open(tmp_config_path, "w") as f:f.write(yaml.dump(data, default_flow_style=False))
+# # cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" --train_semantic_path "%s/6-name2semantic.tsv" --train_phoneme_path "%s/2-name2text.txt" --output_dir "%s/logs_s1"'%(python_exec,tmp_config_path,s1_dir,s1_dir,s1_dir)
+# cmd = '"%s" GPT_SoVITS/s1_train.py --config_file "%s" '%(python_exec,tmp_config_path)
+# yield "GPT训练开始:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+# print(cmd)
+# p_train_GPT = Popen(cmd, shell=True)
+# p_train_GPT.wait()
+# p_train_GPT=None
+# yield "GPT训练完成", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的GPT训练任务,需先终止才能开启下一次任务", {"__type__":"update","visible":False}, {"__type__":"update","visible":True}
+#
+# def close1Bb():
+# global p_train_GPT
+# if(p_train_GPT!=None):
+# kill_process(p_train_GPT.pid)
+# p_train_GPT=None
+# return "已终止GPT训练", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+
+# ps_slice=[]
+# def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_parts):
+# global ps_slice
+# inp = my_utils.clean_path(inp)
+# opt_root = my_utils.clean_path(opt_root)
+# check_for_existance([inp])
+# if(os.path.exists(inp)==False):
+# yield "输入路径不存在", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# return
+# if os.path.isfile(inp):n_parts=1
+# elif os.path.isdir(inp):pass
+# else:
+# yield "输入路径存在但既不是文件也不是文件夹", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# return
+# if (ps_slice == []):
+# for i_part in range(n_parts):
+# cmd = '"%s" tools/slice_audio.py "%s" "%s" %s %s %s %s %s %s %s %s %s''' % (python_exec,inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, n_parts)
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps_slice.append(p)
+# yield "切割执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+# for p in ps_slice:
+# p.wait()
+# ps_slice=[]
+# yield "切割结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}, {"__type__": "update", "value":opt_root}
+# else:
+# yield "已有正在进行的切割任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+#
+# def close_slice():
+# global ps_slice
+# if (ps_slice != []):
+# for p_slice in ps_slice:
+# try:
+# kill_process(p_slice.pid)
+# except:
+# traceback.print_exc()
+# ps_slice=[]
+# return "已终止所有切割进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+# ps1a=[]
+# def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
+# global ps1a
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1a == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "bert_pretrained_dir":bert_pretrained_dir,
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# "is_half": str(is_half)
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1a.append(p)
+# yield "文本进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1a:
+# p.wait()
+# opt = []
+# for i_part in range(all_parts):
+# txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
+# with open(txt_path, "r", encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(txt_path)
+# path_text = "%s/2-name2text.txt" % opt_dir
+# with open(path_text, "w", encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# ps1a=[]
+# if len("".join(opt)) > 0:
+# yield "文本进程成功", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "文本进程失败", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1a():
+# global ps1a
+# if (ps1a != []):
+# for p1a in ps1a:
+# try:
+# kill_process(p1a.pid)
+# except:
+# traceback.print_exc()
+# ps1a=[]
+# return "已终止所有1a进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+#
+# ps1b=[]
+# def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
+# global ps1b
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1b == []):
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":"%s/%s"%(exp_root,exp_name),
+# "cnhubert_base_dir":ssl_pretrained_dir,
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1b.append(p)
+# yield "SSL提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1b:
+# p.wait()
+# ps1b=[]
+# yield "SSL提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的SSL提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+#
+# def close1b():
+# global ps1b
+# if (ps1b != []):
+# for p1b in ps1b:
+# try:
+# kill_process(p1b.pid)
+# except:
+# traceback.print_exc()
+# ps1b=[]
+# return "已终止所有1b进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+# ps1c=[]
+# def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
+# global ps1c
+# inp_text = my_utils.clean_path(inp_text)
+# if check_for_existance([inp_text,''], is_dataset_processing=True):
+# check_details([inp_text,''], is_dataset_processing=True)
+# if (ps1c == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# config={
+# "inp_text":inp_text,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "pretrained_s2G":pretrained_s2G_path,
+# "s2config_path":"GPT_SoVITS/configs/s2.json",
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1c.append(p)
+# yield "语义token提取进程执行中", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1c:
+# p.wait()
+# opt = ["item_name\tsemantic_audio"]
+# path_semantic = "%s/6-name2semantic.tsv" % opt_dir
+# for i_part in range(all_parts):
+# semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
+# with open(semantic_path, "r", encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(semantic_path)
+# with open(path_semantic, "w", encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# ps1c=[]
+# yield "语义token提取进程结束", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}
+# else:
+# yield "已有正在进行的语义token提取任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1c():
+# global ps1c
+# if (ps1c != []):
+# for p1c in ps1c:
+# try:
+# kill_process(p1c.pid)
+# except:
+# traceback.print_exc()
+# ps1c=[]
+# return "已终止所有语义token进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+#####inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G
+# ps1abc=[]
+# def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,ssl_pretrained_dir,pretrained_s2G_path):
+# global ps1abc
+# inp_text = my_utils.clean_path(inp_text)
+# inp_wav_dir = my_utils.clean_path(inp_wav_dir)
+# if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True):
+# check_details([inp_text,inp_wav_dir], is_dataset_processing=True)
+# if (ps1abc == []):
+# opt_dir="%s/%s"%(exp_root,exp_name)
+# try:
+# #############################1a
+# path_text="%s/2-name2text.txt" % opt_dir
+# if(os.path.exists(path_text)==False or (os.path.exists(path_text)==True and len(open(path_text,"r",encoding="utf8").read().strip("\n").split("\n"))<2)):
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "bert_pretrained_dir":bert_pretrained_dir,
+# "is_half": str(is_half)
+# }
+# gpu_names=gpu_numbers1a.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/1-get-text.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+#
+# opt = []
+# for i_part in range(all_parts):#txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
+# txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
+# with open(txt_path, "r",encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(txt_path)
+# with open(path_text, "w",encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# assert len("".join(opt)) > 0, "1Aa-文本获取进程失败"
+# yield "进度:1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc=[]
+# #############################1b
+# config={
+# "inp_text":inp_text,
+# "inp_wav_dir":inp_wav_dir,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "cnhubert_base_dir":ssl_pretrained_dir,
+# }
+# gpu_names=gpu_numbers1Ba.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a-done, 1b-ing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+# yield "进度:1a1b-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc=[]
+# #############################1c
+# path_semantic = "%s/6-name2semantic.tsv" % opt_dir
+# if(os.path.exists(path_semantic)==False or (os.path.exists(path_semantic)==True and os.path.getsize(path_semantic)<31)):
+# config={
+# "inp_text":inp_text,
+# "exp_name":exp_name,
+# "opt_dir":opt_dir,
+# "pretrained_s2G":pretrained_s2G_path,
+# "s2config_path":"GPT_SoVITS/configs/s2.json",
+# }
+# gpu_names=gpu_numbers1c.split("-")
+# all_parts=len(gpu_names)
+# for i_part in range(all_parts):
+# config.update(
+# {
+# "i_part": str(i_part),
+# "all_parts": str(all_parts),
+# "_CUDA_VISIBLE_DEVICES": fix_gpu_number(gpu_names[i_part]),
+# }
+# )
+# os.environ.update(config)
+# cmd = '"%s" GPT_SoVITS/prepare_datasets/3-get-semantic.py'%python_exec
+# print(cmd)
+# p = Popen(cmd, shell=True)
+# ps1abc.append(p)
+# yield "进度:1a1b-done, 1cing", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# for p in ps1abc:p.wait()
+#
+# opt = ["item_name\tsemantic_audio"]
+# for i_part in range(all_parts):
+# semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
+# with open(semantic_path, "r",encoding="utf8") as f:
+# opt += f.read().strip("\n").split("\n")
+# os.remove(semantic_path)
+# with open(path_semantic, "w",encoding="utf8") as f:
+# f.write("\n".join(opt) + "\n")
+# yield "进度:all-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+# ps1abc = []
+# yield "一键三连进程结束", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# except:
+# traceback.print_exc()
+# close1abc()
+# yield "一键三连中途报错", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+# else:
+# yield "已有正在进行的一键三连任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
+
+# def close1abc():
+# global ps1abc
+# if (ps1abc != []):
+# for p1abc in ps1abc:
+# try:
+# kill_process(p1abc.pid)
+# except:
+# traceback.print_exc()
+# ps1abc=[]
+# return "已终止所有一键三连进程", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
+
+def switch_version(version_):
+ os.environ['version']=version_
+ global version
+ version = version_
+ if pretrained_sovits_name[-int(version[-1])+2] !='' and pretrained_gpt_name[-int(version[-1])+2] !='':...
+ else:
+ gr.Warning(i18n(f'未下载{version.upper()}模型'))
+ return {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D")}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}
+
+if os.path.exists('GPT_SoVITS/text/G2PWModel'):...
+else:
+ cmd = '"%s" GPT_SoVITS/download.py'%python_exec
+ p = Popen(cmd, shell=True)
+ p.wait()
+
+def sync(text):
+ return {'__type__':'update','value':text}
+with gr.Blocks(title="GPT-SoVITS WebUI") as app:
+ gr.Markdown(
+ value=
+ i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ # gr.Markdown(
+ # value=
+ # i18n("中文教程文档:https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e")
+ # )
+
+ with gr.Tabs():
+ # with gr.TabItem(i18n("0-前置数据集获取工具")):#提前随机切片防止uvr5爆内存->uvr5->slicer->asr->打标
+ # gr.Markdown(value=i18n("0a-UVR5人声伴奏分离&去混响去延迟工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # uvr5_info = gr.Textbox(label=i18n("UVR5进程输出信息"))
+ # open_uvr5 = gr.Button(value=i18n("开启UVR5-WebUI"),variant="primary",visible=True)
+ # close_uvr5 = gr.Button(value=i18n("关闭UVR5-WebUI"),variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0b-语音切分工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # slice_inp_path=gr.Textbox(label=i18n("音频自动切分输入路径,可文件可文件夹"),value="")
+ # slice_opt_root=gr.Textbox(label=i18n("切分后的子音频的输出根目录"),value="output/slicer_opt")
+ # with gr.Row():
+ # threshold=gr.Textbox(label=i18n("threshold:音量小于这个值视作静音的备选切割点"),value="-34")
+ # min_length=gr.Textbox(label=i18n("min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值"),value="4000")
+ # min_interval=gr.Textbox(label=i18n("min_interval:最短切割间隔"),value="300")
+ # hop_size=gr.Textbox(label=i18n("hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)"),value="10")
+ # max_sil_kept=gr.Textbox(label=i18n("max_sil_kept:切完后静音最多留多长"),value="500")
+ # with gr.Row():
+ # _max=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("max:归一化后最大值多少"),value=0.9,interactive=True)
+ # alpha=gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("alpha_mix:混多少比例归一化后音频进来"),value=0.25,interactive=True)
+ # with gr.Row():
+ # n_process=gr.Slider(minimum=1,maximum=n_cpu,step=1,label=i18n("切割使用的进程数"),value=4,interactive=True)
+ # slicer_info = gr.Textbox(label=i18n("语音切割进程输出信息"))
+ # open_slicer_button=gr.Button(i18n("开启语音切割"), variant="primary",visible=True)
+ # close_slicer_button=gr.Button(i18n("终止语音切割"), variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0bb-语音降噪工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # denoise_input_dir=gr.Textbox(label=i18n("降噪音频文件输入文件夹"),value="")
+ # denoise_output_dir=gr.Textbox(label=i18n("降噪结果输出文件夹"),value="output/denoise_opt")
+ # with gr.Row():
+ # denoise_info = gr.Textbox(label=i18n("语音降噪进程输出信息"))
+ # open_denoise_button = gr.Button(i18n("开启语音降噪"), variant="primary",visible=True)
+ # close_denoise_button = gr.Button(i18n("终止语音降噪进程"), variant="primary",visible=False)
+ # gr.Markdown(value=i18n("0c-中文批量离线ASR工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # asr_inp_dir = gr.Textbox(
+ # label=i18n("输入文件夹路径"),
+ # value="D:\\GPT-SoVITS\\raw\\xxx",
+ # interactive=True,
+ # )
+ # asr_opt_dir = gr.Textbox(
+ # label = i18n("输出文件夹路径"),
+ # value = "output/asr_opt",
+ # interactive = True,
+ # )
+ # with gr.Row():
+ # asr_model = gr.Dropdown(
+ # label = i18n("ASR 模型"),
+ # choices = list(asr_dict.keys()),
+ # interactive = True,
+ # value="达摩 ASR (中文)"
+ # )
+ # asr_size = gr.Dropdown(
+ # label = i18n("ASR 模型尺寸"),
+ # choices = ["large"],
+ # interactive = True,
+ # value="large"
+ # )
+ # asr_lang = gr.Dropdown(
+ # label = i18n("ASR 语言设置"),
+ # choices = ["zh","yue"],
+ # interactive = True,
+ # value="zh"
+ # )
+ # asr_precision = gr.Dropdown(
+ # label = i18n("数据类型精度"),
+ # choices = ["float32"],
+ # interactive = True,
+ # value="float32"
+ # )
+ # with gr.Row():
+ # asr_info = gr.Textbox(label=i18n("ASR进程输出信息"))
+ # open_asr_button = gr.Button(i18n("开启离线批量ASR"), variant="primary",visible=True)
+ # close_asr_button = gr.Button(i18n("终止ASR进程"), variant="primary",visible=False)
+ #
+ # def change_lang_choices(key): #根据选择的模型修改可选的语言
+ # # return gr.Dropdown(choices=asr_dict[key]['lang'])
+ # return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]}
+ # def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸
+ # # return gr.Dropdown(choices=asr_dict[key]['size'])
+ # return {"__type__": "update", "choices": asr_dict[key]['size'],"value":asr_dict[key]['size'][-1]}
+ # def change_precision_choices(key): #根据选择的模型修改可选的语言
+ # if key =="Faster Whisper (多语种)":
+ # if default_batch_size <= 4:
+ # precision = 'int8'
+ # elif is_half:
+ # precision = 'float16'
+ # else:
+ # precision = 'float32'
+ # else:
+ # precision = 'float32'
+ # # return gr.Dropdown(choices=asr_dict[key]['precision'])
+ # return {"__type__": "update", "choices": asr_dict[key]['precision'],"value":precision}
+ # asr_model.change(change_lang_choices, [asr_model], [asr_lang])
+ # asr_model.change(change_size_choices, [asr_model], [asr_size])
+ # asr_model.change(change_precision_choices, [asr_model], [asr_precision])
+ #
+ #
+ # gr.Markdown(value=i18n("0d-语音文本校对标注工具"))
+ # with gr.Row():
+ # with gr.Column(scale=3):
+ # with gr.Row():
+ # path_list = gr.Textbox(
+ # label=i18n(".list标注文件的路径"),
+ # value="D:\\RVC1006\\GPT-SoVITS\\raw\\xxx.list",
+ # interactive=True,
+ # )
+ # label_info = gr.Textbox(label=i18n("打标工具进程输出信息"))
+ #
+ # open_label = gr.Button(value=i18n("开启打标WebUI"),variant="primary",visible=True)
+ # close_label = gr.Button(value=i18n("关闭打标WebUI"),variant="primary",visible=False)
+ # open_label.click(change_label, [path_list], [label_info,open_label,close_label])
+ # close_label.click(change_label, [path_list], [label_info,open_label,close_label])
+ # open_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
+ # close_uvr5.click(change_uvr5, [], [uvr5_info,open_uvr5,close_uvr5])
+
+ with gr.TabItem(i18n("1-GPT-SoVITS-TTS")):
+ with gr.Row():
+ with gr.Row():
+ # exp_name = gr.Textbox(label=i18n("*实验/模型名"), value="xxx", interactive=True)
+ gpu_info = gr.Textbox(label=i18n("显卡信息"), value=gpu_info, visible=True, interactive=False)
+ version_checkbox = gr.Radio(label=i18n("版本"),value=version,choices=['v1','v2'])
+ with gr.Row():
+ pretrained_s2G = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[-int(version[-1])+2], interactive=True, lines=2, max_lines=3,scale=9)
+ pretrained_s2D = gr.Textbox(label=i18n("预训练的SoVITS-D模型路径"), value=pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D"), interactive=True, lines=2, max_lines=3,scale=9)
+ pretrained_s1 = gr.Textbox(label=i18n("预训练的GPT模型路径"), value=pretrained_gpt_name[-int(version[-1])+2], interactive=True, lines=2, max_lines=3,scale=10)
+ bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),
+ value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",
+ interactive=False, lines=2)
+ cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),
+ value="GPT_SoVITS/pretrained_models/chinese-hubert-base",
+ interactive=False, lines=2)
+ # with gr.TabItem(i18n("1A-训练集格式化工具")):
+ # gr.Markdown(value=i18n("输出logs/实验名目录下应有23456开头的文件和文件夹"))
+ # with gr.Row():
+ # with gr.Row():
+ # inp_text = gr.Textbox(label=i18n("*文本标注文件"),value=r"D:\RVC1006\GPT-SoVITS\raw\xxx.list",interactive=True,scale=10)
+ # with gr.Row():
+ # inp_wav_dir = gr.Textbox(
+ # label=i18n("*训练集音频文件目录"),
+ # # value=r"D:\RVC1006\GPT-SoVITS\raw\xxx",
+ # interactive=True,
+ # placeholder=i18n("填切割后音频所在目录!读取的音频文件完整路径=该目录-拼接-list文件里波形对应的文件名(不是全路径)。如果留空则使用.list文件里的绝对全路径。"), scale=10
+ # )
+ # gr.Markdown(value=i18n("1Aa-文本内容"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1a = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # bert_pretrained_dir = gr.Textbox(label=i18n("预训练的中文BERT模型路径"),value="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",interactive=False,lines=2)
+ # with gr.Row():
+ # button1a_open = gr.Button(i18n("开启文本获取"), variant="primary",visible=True)
+ # button1a_close = gr.Button(i18n("终止文本获取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1a=gr.Textbox(label=i18n("文本进程输出信息"))
+ # gr.Markdown(value=i18n("1Ab-SSL自监督特征提取"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # cnhubert_base_dir = gr.Textbox(label=i18n("预训练的SSL模型路径"),value="GPT_SoVITS/pretrained_models/chinese-hubert-base",interactive=False,lines=2)
+ # with gr.Row():
+ # button1b_open = gr.Button(i18n("开启SSL提取"), variant="primary",visible=True)
+ # button1b_close = gr.Button(i18n("终止SSL提取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1b=gr.Textbox(label=i18n("SSL进程输出信息"))
+ # gr.Markdown(value=i18n("1Ac-语义token提取"))
+ # with gr.Row():
+ # with gr.Row():
+ # gpu_numbers1c = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"),value="%s-%s"%(gpus,gpus),interactive=True)
+ # with gr.Row():
+ # pretrained_s2G_ = gr.Textbox(label=i18n("预训练的SoVITS-G模型路径"), value=pretrained_sovits_name[-int(version[-1])+2], interactive=False,lines=2)
+ # with gr.Row():
+ # button1c_open = gr.Button(i18n("开启语义token提取"), variant="primary",visible=True)
+ # button1c_close = gr.Button(i18n("终止语义token提取进程"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1c=gr.Textbox(label=i18n("语义token提取进程输出信息"))
+ # gr.Markdown(value=i18n("1Aabc-训练集格式化一键三连"))
+ # with gr.Row():
+ # with gr.Row():
+ # button1abc_open = gr.Button(i18n("开启一键三连"), variant="primary",visible=True)
+ # button1abc_close = gr.Button(i18n("终止一键三连"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1abc=gr.Textbox(label=i18n("一键三连进程输出信息"))
+ #
+ # pretrained_s2G.change(sync,[pretrained_s2G],[pretrained_s2G_])
+ # open_asr_button.click(open_asr, [asr_inp_dir, asr_opt_dir, asr_model, asr_size, asr_lang, asr_precision], [asr_info,open_asr_button,close_asr_button,path_list,inp_text,inp_wav_dir])
+ # close_asr_button.click(close_asr, [], [asr_info,open_asr_button,close_asr_button])
+ # open_slicer_button.click(open_slice, [slice_inp_path,slice_opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,n_process], [slicer_info,open_slicer_button,close_slicer_button,asr_inp_dir,denoise_input_dir,inp_wav_dir])
+ # close_slicer_button.click(close_slice, [], [slicer_info,open_slicer_button,close_slicer_button])
+ # open_denoise_button.click(open_denoise, [denoise_input_dir,denoise_output_dir], [denoise_info,open_denoise_button,close_denoise_button,asr_inp_dir,inp_wav_dir])
+ # close_denoise_button.click(close_denoise, [], [denoise_info,open_denoise_button,close_denoise_button])
+ #
+ # button1a_open.click(open1a, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,bert_pretrained_dir], [info1a,button1a_open,button1a_close])
+ # button1a_close.click(close1a, [], [info1a,button1a_open,button1a_close])
+ # button1b_open.click(open1b, [inp_text,inp_wav_dir,exp_name,gpu_numbers1Ba,cnhubert_base_dir], [info1b,button1b_open,button1b_close])
+ # button1b_close.click(close1b, [], [info1b,button1b_open,button1b_close])
+ # button1c_open.click(open1c, [inp_text,exp_name,gpu_numbers1c,pretrained_s2G], [info1c,button1c_open,button1c_close])
+ # button1c_close.click(close1c, [], [info1c,button1c_open,button1c_close])
+ # button1abc_open.click(open1abc, [inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numbers1c,bert_pretrained_dir,cnhubert_base_dir,pretrained_s2G], [info1abc,button1abc_open,button1abc_close])
+ # button1abc_close.click(close1abc, [], [info1abc,button1abc_open,button1abc_close])
+ # with gr.TabItem(i18n("1B-微调训练")):
+ # gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。"))
+ # with gr.Row():
+ # with gr.Column():
+ # with gr.Row():
+ # batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
+ # total_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True)
+ # with gr.Row():
+ # text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True)
+ # save_every_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True)
+ # with gr.Column():
+ # with gr.Column():
+ # if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
+ # if_save_every_weights = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
+ # with gr.Row():
+ # gpu_numbers1Ba = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
+ # with gr.Row():
+ # with gr.Row():
+ # button1Ba_open = gr.Button(i18n("开启SoVITS训练"), variant="primary",visible=True)
+ # button1Ba_close = gr.Button(i18n("终止SoVITS训练"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1Ba=gr.Textbox(label=i18n("SoVITS训练进程输出信息"))
+ # gr.Markdown(value=i18n("1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。"))
+ # with gr.Row():
+ # with gr.Column():
+ # with gr.Row():
+ # batch_size1Bb = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True)
+ # total_epoch1Bb = gr.Slider(minimum=2,maximum=50,step=1,label=i18n("总训练轮数total_epoch"),value=15,interactive=True)
+ # with gr.Row():
+ # save_every_epoch1Bb = gr.Slider(minimum=1,maximum=50,step=1,label=i18n("保存频率save_every_epoch"),value=5,interactive=True)
+ # if_dpo = gr.Checkbox(label=i18n("是否开启dpo训练选项(实验性)"), value=False, interactive=True, show_label=True)
+ # with gr.Column():
+ # with gr.Column():
+ # if_save_latest1Bb = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True)
+ # if_save_every_weights1Bb = gr.Checkbox(label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"), value=True, interactive=True, show_label=True)
+ # with gr.Row():
+ # gpu_numbers1Bb = gr.Textbox(label=i18n("GPU卡号以-分割,每个卡号一个进程"), value="%s" % (gpus), interactive=True)
+ # with gr.Row():
+ # with gr.Row():
+ # button1Bb_open = gr.Button(i18n("开启GPT训练"), variant="primary",visible=True)
+ # button1Bb_close = gr.Button(i18n("终止GPT训练"), variant="primary",visible=False)
+ # with gr.Row():
+ # info1Bb=gr.Textbox(label=i18n("GPT训练进程输出信息"))
+ # button1Ba_open.click(open1Ba, [batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_save_every_weights,save_every_epoch,gpu_numbers1Ba,pretrained_s2G,pretrained_s2D], [info1Ba,button1Ba_open,button1Ba_close])
+ # button1Ba_close.click(close1Ba, [], [info1Ba,button1Ba_open,button1Ba_close])
+ # button1Bb_open.click(open1Bb, [batch_size1Bb,total_epoch1Bb,exp_name,if_dpo,if_save_latest1Bb,if_save_every_weights1Bb,save_every_epoch1Bb,gpu_numbers1Bb,pretrained_s1], [info1Bb,button1Bb_open,button1Bb_close])
+ # button1Bb_close.click(close1Bb, [], [info1Bb,button1Bb_open,button1Bb_close])
+ with gr.TabItem(i18n("推理")):
+ gr.Markdown(value=i18n("选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模,体验5秒Zero Shot TTS用。"))
+ with gr.Row():
+ with gr.Row():
+ GPT_dropdown = gr.Dropdown(label=i18n("*GPT模型列表"), choices=sorted(GPT_names,key=custom_sort_key),value=pretrained_gpt_name[0],interactive=True)
+ SoVITS_dropdown = gr.Dropdown(label=i18n("*SoVITS模型列表"), choices=sorted(SoVITS_names,key=custom_sort_key),value=pretrained_sovits_name[0],interactive=True)
+ with gr.Row():
+ gpu_number_1C=gr.Textbox(label=i18n("GPU卡号,只能填1个整数"), value=gpus, interactive=True)
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
+ refresh_button.click(fn=change_choices,inputs=[],outputs=[SoVITS_dropdown,GPT_dropdown])
+ with gr.Row():
+ with gr.Row():
+ batched_infer_enabled = gr.Checkbox(label=i18n("启用并行推理版本(推理速度更快)"), value=False, interactive=True, show_label=True)
+ with gr.Row():
+ open_tts = gr.Button(value=i18n("开启TTS推理WebUI"),variant='primary',visible=True)
+ close_tts = gr.Button(value=i18n("关闭TTS推理WebUI"),variant='primary',visible=False)
+ with gr.Row():
+ tts_info = gr.Textbox(label=i18n("TTS推理WebUI进程输出信息"))
+ open_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
+ close_tts.click(change_tts_inference, [bert_pretrained_dir,cnhubert_base_dir,gpu_number_1C,GPT_dropdown,SoVITS_dropdown, batched_infer_enabled], [tts_info,open_tts,close_tts])
+ version_checkbox.change(switch_version,[version_checkbox],[pretrained_s2G,pretrained_s2D,pretrained_s1,GPT_dropdown,SoVITS_dropdown])
+ # with gr.TabItem(i18n("2-GPT-SoVITS-变声")):gr.Markdown(value=i18n("施工中,请静候佳音"))
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=webui_port_main,
+ quiet=True,
+ )