From bf494b6b2d849ff650b0c8494b8f32c9eb5c9fbd Mon Sep 17 00:00:00 2001
From: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com>
Date: Thu, 1 Feb 2024 20:14:09 +0800
Subject: [PATCH 1/8] Update inference_gui.py
---
GPT_SoVITS/inference_gui.py | 469 +-----------------------------------
1 file changed, 1 insertion(+), 468 deletions(-)
diff --git a/GPT_SoVITS/inference_gui.py b/GPT_SoVITS/inference_gui.py
index fd2dae86..d5238049 100644
--- a/GPT_SoVITS/inference_gui.py
+++ b/GPT_SoVITS/inference_gui.py
@@ -1,480 +1,13 @@
-import os,re,logging
-logging.getLogger("markdown_it").setLevel(logging.ERROR)
-logging.getLogger("urllib3").setLevel(logging.ERROR)
-logging.getLogger("httpcore").setLevel(logging.ERROR)
-logging.getLogger("httpx").setLevel(logging.ERROR)
-logging.getLogger("asyncio").setLevel(logging.ERROR)
-
-logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
-logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
-import pdb
-
-if os.path.exists("./gweight.txt"):
- with open("./gweight.txt", 'r',encoding="utf-8") as file:
- gweight_data = file.read()
- gpt_path = os.environ.get(
- "gpt_path", gweight_data)
-else:
- gpt_path = os.environ.get(
- "gpt_path", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt")
-
-if os.path.exists("./sweight.txt"):
- with open("./sweight.txt", 'r',encoding="utf-8") as file:
- sweight_data = file.read()
- sovits_path = os.environ.get("sovits_path", sweight_data)
-else:
- sovits_path = os.environ.get("sovits_path", "GPT_SoVITS/pretrained_models/s2G488k.pth")
-# gpt_path = os.environ.get(
-# "gpt_path", "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
-# )
-# sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth")
-cnhubert_base_path = os.environ.get(
- "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
-)
-bert_path = os.environ.get(
- "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
-)
-infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
-infer_ttswebui = int(infer_ttswebui)
-is_share = os.environ.get("is_share", "False")
-is_share=eval(is_share)
-if "_CUDA_VISIBLE_DEVICES" in os.environ:
- os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
-is_half = eval(os.environ.get("is_half", "True"))
-import gradio as gr
-from transformers import AutoModelForMaskedLM, AutoTokenizer
-import numpy as np
-import librosa,torch
-from feature_extractor import cnhubert
-cnhubert.cnhubert_base_path=cnhubert_base_path
-
import sys
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox
import soundfile as sf
-from module.models import SynthesizerTrn
-from AR.models.t2s_lightning_module import Text2SemanticLightningModule
-from text import cleaned_text_to_sequence
-from text.cleaner import clean_text
-from time import time as ttime
-from module.mel_processing import spectrogram_torch
-from my_utils import load_audio
from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
-os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
-
-if torch.cuda.is_available():
- device = "cuda"
-elif torch.backends.mps.is_available():
- device = "mps"
-else:
- device = "cpu"
-
-tokenizer = AutoTokenizer.from_pretrained(bert_path)
-bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
-if is_half == True:
- bert_model = bert_model.half().to(device)
-else:
- bert_model = bert_model.to(device)
-
-def get_bert_feature(text, word2ph):
- with torch.no_grad():
- inputs = tokenizer(text, return_tensors="pt")
- for i in inputs:
- inputs[i] = inputs[i].to(device)
- res = bert_model(**inputs, output_hidden_states=True)
- res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
- assert len(word2ph) == len(text)
- phone_level_feature = []
- for i in range(len(word2ph)):
- repeat_feature = res[i].repeat(word2ph[i], 1)
- phone_level_feature.append(repeat_feature)
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
- return phone_level_feature.T
-
-class DictToAttrRecursive(dict):
- def __init__(self, input_dict):
- super().__init__(input_dict)
- for key, value in input_dict.items():
- if isinstance(value, dict):
- value = DictToAttrRecursive(value)
- self[key] = value
- setattr(self, key, value)
-
- def __getattr__(self, item):
- try:
- return self[item]
- except KeyError:
- raise AttributeError(f"Attribute {item} not found")
-
- def __setattr__(self, key, value):
- if isinstance(value, dict):
- value = DictToAttrRecursive(value)
- super(DictToAttrRecursive, self).__setitem__(key, value)
- super().__setattr__(key, value)
-
- def __delattr__(self, item):
- try:
- del self[item]
- except KeyError:
- raise AttributeError(f"Attribute {item} not found")
-
-
-ssl_model = cnhubert.get_model()
-if is_half == True:
- ssl_model = ssl_model.half().to(device)
-else:
- ssl_model = ssl_model.to(device)
-
-def change_sovits_weights(sovits_path):
- global vq_model,hps
- dict_s2=torch.load(sovits_path,map_location="cpu")
- hps=dict_s2["config"]
- hps = DictToAttrRecursive(hps)
- hps.model.semantic_frame_rate = "25hz"
- vq_model = SynthesizerTrn(
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- n_speakers=hps.data.n_speakers,
- **hps.model
- )
- if("pretrained"not in sovits_path):
- del vq_model.enc_q
- if is_half == True:
- vq_model = vq_model.half().to(device)
- else:
- vq_model = vq_model.to(device)
- vq_model.eval()
- print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
- with open("./sweight.txt","w",encoding="utf-8")as f:f.write(sovits_path)
-change_sovits_weights(sovits_path)
-
-def change_gpt_weights(gpt_path):
- global hz,max_sec,t2s_model,config
- hz = 50
- dict_s1 = torch.load(gpt_path, map_location="cpu")
- config = dict_s1["config"]
- max_sec = config["data"]["max_sec"]
- t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
- t2s_model.load_state_dict(dict_s1["weight"])
- if is_half == True:
- t2s_model = t2s_model.half()
- t2s_model = t2s_model.to(device)
- t2s_model.eval()
- total = sum([param.nelement() for param in t2s_model.parameters()])
- print("Number of parameter: %.2fM" % (total / 1e6))
- with open("./gweight.txt","w",encoding="utf-8")as f:f.write(gpt_path)
-change_gpt_weights(gpt_path)
-
-def get_spepc(hps, filename):
- audio = load_audio(filename, int(hps.data.sampling_rate))
- audio = torch.FloatTensor(audio)
- audio_norm = audio
- audio_norm = audio_norm.unsqueeze(0)
- spec = spectrogram_torch(
- audio_norm,
- hps.data.filter_length,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- center=False,
- )
- return spec
-
-
-dict_language={
- i18n("中文"):"zh",
- i18n("英文"):"en",
- i18n("日文"):"ja"
-}
-
-
-def splite_en_inf(sentence, language):
- pattern = re.compile(r'[a-zA-Z. ]+')
- textlist = []
- langlist = []
- pos = 0
- for match in pattern.finditer(sentence):
- start, end = match.span()
- if start > pos:
- textlist.append(sentence[pos:start])
- langlist.append(language)
- textlist.append(sentence[start:end])
- langlist.append("en")
- pos = end
- if pos < len(sentence):
- textlist.append(sentence[pos:])
- langlist.append(language)
-
- return textlist, langlist
-
-
-def clean_text_inf(text, language):
- phones, word2ph, norm_text = clean_text(text, language)
- phones = cleaned_text_to_sequence(phones)
-
- return phones, word2ph, norm_text
-
-
-def get_bert_inf(phones, word2ph, norm_text, language):
- if language == "zh":
- bert = get_bert_feature(norm_text, word2ph).to(device)
- else:
- bert = torch.zeros(
- (1024, len(phones)),
- dtype=torch.float16 if is_half == True else torch.float32,
- ).to(device)
-
- return bert
-
-
-def nonen_clean_text_inf(text, language):
- textlist, langlist = splite_en_inf(text, language)
- phones_list = []
- word2ph_list = []
- norm_text_list = []
- for i in range(len(textlist)):
- lang = langlist[i]
- phones, word2ph, norm_text = clean_text_inf(textlist[i], lang)
- phones_list.append(phones)
- if lang == "en" or "ja":
- pass
- else:
- word2ph_list.append(word2ph)
- norm_text_list.append(norm_text)
- print(word2ph_list)
- phones = sum(phones_list, [])
- word2ph = sum(word2ph_list, [])
- norm_text = ' '.join(norm_text_list)
-
- return phones, word2ph, norm_text
-
-
-def nonen_get_bert_inf(text, language):
- textlist, langlist = splite_en_inf(text, language)
- print(textlist)
- print(langlist)
- bert_list = []
- for i in range(len(textlist)):
- text = textlist[i]
- lang = langlist[i]
- phones, word2ph, norm_text = clean_text_inf(text, lang)
- bert = get_bert_inf(phones, word2ph, norm_text, lang)
- bert_list.append(bert)
- bert = torch.cat(bert_list, dim=1)
-
- return bert
-
-splits = {",","。","?","!",",",".","?","!","~",":",":","—","…",}
-def get_first(text):
- pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
- text = re.split(pattern, text)[0].strip()
- return text
-
-def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,how_to_cut=i18n("不切")):
- t0 = ttime()
- prompt_text = prompt_text.strip("\n")
- if(prompt_text[-1]not in splits):prompt_text+="。"if prompt_text!="en"else "."
- text = text.strip("\n")
- if(len(get_first(text))<4):text+="。"if text!="en"else "."
- zero_wav = np.zeros(
- int(hps.data.sampling_rate * 0.3),
- dtype=np.float16 if is_half == True else np.float32,
- )
- with torch.no_grad():
- wav16k, sr = librosa.load(ref_wav_path, sr=16000)
- if(wav16k.shape[0]>160000 or wav16k.shape[0]<48000):
- raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
- wav16k = torch.from_numpy(wav16k)
- zero_wav_torch = torch.from_numpy(zero_wav)
- if is_half == True:
- wav16k = wav16k.half().to(device)
- zero_wav_torch = zero_wav_torch.half().to(device)
- else:
- wav16k = wav16k.to(device)
- zero_wav_torch = zero_wav_torch.to(device)
- wav16k=torch.cat([wav16k,zero_wav_torch])
- ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
- "last_hidden_state"
- ].transpose(
- 1, 2
- ) # .float()
- codes = vq_model.extract_latent(ssl_content)
- prompt_semantic = codes[0, 0]
- t1 = ttime()
- prompt_language = dict_language[prompt_language]
- text_language = dict_language[text_language]
-
- if prompt_language == "en":
- phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language)
- else:
- phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language)
- if(how_to_cut==i18n("凑四句一切")):text=cut1(text)
- elif(how_to_cut==i18n("凑50字一切")):text=cut2(text)
- elif(how_to_cut==i18n("按中文句号。切")):text=cut3(text)
- elif(how_to_cut==i18n("按英文句号.切")):text=cut4(text)
- text = text.replace("\n\n","\n").replace("\n\n","\n").replace("\n\n","\n")
- if(text[-1]not in splits):text+="。"if text_language!="en"else "."
- texts=text.split("\n")
- audio_opt = []
- if prompt_language == "en":
- bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language)
- else:
- bert1 = nonen_get_bert_inf(prompt_text, prompt_language)
-
- for text in texts:
- # 解决输入目标文本的空行导致报错的问题
- if (len(text.strip()) == 0):
- continue
- if text_language == "en":
- phones2, word2ph2, norm_text2 = clean_text_inf(text, text_language)
- else:
- phones2, word2ph2, norm_text2 = nonen_clean_text_inf(text, text_language)
-
- if text_language == "en":
- bert2 = get_bert_inf(phones2, word2ph2, norm_text2, text_language)
- else:
- bert2 = nonen_get_bert_inf(text, text_language)
-
- bert = torch.cat([bert1, bert2], 1)
-
- all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
- bert = bert.to(device).unsqueeze(0)
- all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
- prompt = prompt_semantic.unsqueeze(0).to(device)
- t2 = ttime()
- with torch.no_grad():
- # pred_semantic = t2s_model.model.infer(
- pred_semantic, idx = t2s_model.model.infer_panel(
- all_phoneme_ids,
- all_phoneme_len,
- prompt,
- bert,
- # prompt_phone_len=ph_offset,
- top_k=config["inference"]["top_k"],
- early_stop_num=hz * max_sec,
- )
- t3 = ttime()
- # print(pred_semantic.shape,idx)
- pred_semantic = pred_semantic[:, -idx:].unsqueeze(
- 0
- ) # .unsqueeze(0)#mq要多unsqueeze一次
- refer = get_spepc(hps, ref_wav_path) # .to(device)
- if is_half == True:
- refer = refer.half().to(device)
- else:
- refer = refer.to(device)
- # audio = vq_model.decode(pred_semantic, all_phoneme_ids, refer).detach().cpu().numpy()[0, 0]
- audio = (
- vq_model.decode(
- pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refer
- )
- .detach()
- .cpu()
- .numpy()[0, 0]
- ) ###试试重建不带上prompt部分
- audio_opt.append(audio)
- audio_opt.append(zero_wav)
- t4 = ttime()
- print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
- yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
- np.int16
- )
-
-def split(todo_text):
- todo_text = todo_text.replace("……", "。").replace("——", ",")
- if todo_text[-1] not in splits:
- todo_text += "。"
- i_split_head = i_split_tail = 0
- len_text = len(todo_text)
- todo_texts = []
- while 1:
- if i_split_head >= len_text:
- break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
- if todo_text[i_split_head] in splits:
- i_split_head += 1
- todo_texts.append(todo_text[i_split_tail:i_split_head])
- i_split_tail = i_split_head
- else:
- i_split_head += 1
- return todo_texts
-
-
-def cut1(inp):
- inp = inp.strip("\n")
- inps = split(inp)
- split_idx = list(range(0, len(inps), 4))
- split_idx[-1] = None
- if len(split_idx) > 1:
- opts = []
- for idx in range(len(split_idx) - 1):
- opts.append("".join(inps[split_idx[idx] : split_idx[idx + 1]]))
- else:
- opts = [inp]
- return "\n".join(opts)
-
-
-def cut2(inp):
- inp = inp.strip("\n")
- inps = split(inp)
- if len(inps) < 2:
- return inp
- opts = []
- summ = 0
- tmp_str = ""
- for i in range(len(inps)):
- summ += len(inps[i])
- tmp_str += inps[i]
- if summ > 50:
- summ = 0
- opts.append(tmp_str)
- tmp_str = ""
- if tmp_str != "":
- opts.append(tmp_str)
- # print(opts)
- if len(opts)>1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
- opts[-2] = opts[-2] + opts[-1]
- opts = opts[:-1]
- return "\n".join(opts)
-
-
-def cut3(inp):
- inp = inp.strip("\n")
- return "\n".join(["%s。" % item for item in inp.strip("。").split("。")])
-def cut4(inp):
- inp = inp.strip("\n")
- return "\n".join(["%s." % item for item in inp.strip(".").split(".")])
-
-def custom_sort_key(s):
- # 使用正则表达式提取字符串中的数字部分和非数字部分
- parts = re.split('(\d+)', s)
- # 将数字部分转换为整数,非数字部分保持不变
- parts = [int(part) if part.isdigit() else part for part in parts]
- return parts
-
-def change_choices():
- SoVITS_names, GPT_names = get_weights_names()
- return {"choices": sorted(SoVITS_names,key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names,key=custom_sort_key), "__type__": "update"}
-
-pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth"
-pretrained_gpt_name="GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
-SoVITS_weight_root="SoVITS_weights"
-GPT_weight_root="GPT_weights"
-os.makedirs(SoVITS_weight_root,exist_ok=True)
-os.makedirs(GPT_weight_root,exist_ok=True)
-
-def get_weights_names():
- SoVITS_names = [pretrained_sovits_name]
- for name in os.listdir(SoVITS_weight_root):
- if name.endswith(".pth"):SoVITS_names.append("%s/%s"%(SoVITS_weight_root,name))
- GPT_names = [pretrained_gpt_name]
- for name in os.listdir(GPT_weight_root):
- if name.endswith(".ckpt"): GPT_names.append("%s/%s"%(GPT_weight_root,name))
- return SoVITS_names,GPT_names
-SoVITS_names,GPT_names = get_weights_names()
+from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
class GPTSoVITSGUI(QMainWindow):
From 45f73519cc41cd17cf816d8b997a9dcb0bee04b6 Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Thu, 1 Feb 2024 23:47:13 +0800
Subject: [PATCH 2/8] Update cmd-asr.py
---
tools/damo_asr/cmd-asr.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/damo_asr/cmd-asr.py b/tools/damo_asr/cmd-asr.py
index 70dd4d8f..d5c07bb6 100644
--- a/tools/damo_asr/cmd-asr.py
+++ b/tools/damo_asr/cmd-asr.py
@@ -4,7 +4,7 @@ import sys,os,traceback
from funasr import AutoModel
-dir=sys.argv[1]
+dir=sys.argv[1].strip("/")
# opt_name=dir.split("\\")[-1].split("/")[-1]
opt_name=os.path.basename(dir)
From dc8ac29a5083fb7a89fba7233096db02b813f35e Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Fri, 2 Feb 2024 10:16:43 +0800
Subject: [PATCH 3/8] Update cmd-asr.py
---
tools/damo_asr/cmd-asr.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tools/damo_asr/cmd-asr.py b/tools/damo_asr/cmd-asr.py
index d5c07bb6..9a107972 100644
--- a/tools/damo_asr/cmd-asr.py
+++ b/tools/damo_asr/cmd-asr.py
@@ -4,7 +4,8 @@ import sys,os,traceback
from funasr import AutoModel
-dir=sys.argv[1].strip("/")
+dir=sys.argv[1]
+if(dir[-1]=="/"):dir=dir[:-1]
# opt_name=dir.split("\\")[-1].split("/")[-1]
opt_name=os.path.basename(dir)
From 3c14a37f8bdda61d4cf0a5523183127479ccdc77 Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Fri, 2 Feb 2024 22:38:32 +0800
Subject: [PATCH 4/8] Add files via upload
---
GPT_SoVITS/text/japanese.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/GPT_SoVITS/text/japanese.py b/GPT_SoVITS/text/japanese.py
index 68112b96..a571467c 100644
--- a/GPT_SoVITS/text/japanese.py
+++ b/GPT_SoVITS/text/japanese.py
@@ -97,7 +97,7 @@ def text_normalize(text):
return text
# Copied from espnet https://github.com/espnet/espnet/blob/master/espnet2/text/phoneme_tokenizer.py
-def pyopenjtalk_g2p_prosody(text: str, drop_unvoiced_vowels: bool = True) -> list[str]:
+def pyopenjtalk_g2p_prosody(text, drop_unvoiced_vowels=True):
"""Extract phoneme + prosoody symbol sequence from input full-context labels.
The algorithm is based on `Prosodic features control by symbols as input of
From dba1a74ccb0cf19a1b4eb93faf11d4ec2b1fc5d7 Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Sat, 3 Feb 2024 00:01:26 +0800
Subject: [PATCH 5/8] =?UTF-8?q?=E4=BF=AE=E5=A4=8Duvr5=E8=AF=86=E5=88=AB?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=A4=B1=E8=B4=A5=E6=8A=A5=E9=94=99=E9=97=AE?=
=?UTF-8?q?=E9=A2=98=E3=80=82=E9=99=A4=E9=9D=9E=E5=88=97=E4=B8=BE=E4=B8=8D?=
=?UTF-8?q?=E8=83=BD=E8=AF=86=E5=88=AB=E7=9A=84bad=20case=EF=BC=8C?=
=?UTF-8?q?=E5=90=A6=E5=88=99=E7=A6=81=E6=AD=A2=E6=94=B9=E5=8A=A8=E6=AD=A4?=
=?UTF-8?q?=E6=96=87=E4=BB=B6=E3=80=82?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
修复uvr5识别格式失败报错问题。除非列举不能识别的bad case,否则禁止改动此文件。
---
tools/webui.py | 178 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 178 insertions(+)
create mode 100644 tools/webui.py
diff --git a/tools/webui.py b/tools/webui.py
new file mode 100644
index 00000000..41ec5887
--- /dev/null
+++ b/tools/webui.py
@@ -0,0 +1,178 @@
+import os
+import traceback,gradio as gr
+import logging
+from tools.i18n.i18n import I18nAuto
+i18n = I18nAuto()
+
+logger = logging.getLogger(__name__)
+import librosa,ffmpeg
+import soundfile as sf
+import torch
+import sys
+from mdxnet import MDXNetDereverb
+from vr import AudioPre, AudioPreDeEcho
+
+weight_uvr5_root = "tools/uvr5/uvr5_weights"
+uvr5_names = []
+for name in os.listdir(weight_uvr5_root):
+ if name.endswith(".pth") or "onnx" in name:
+ uvr5_names.append(name.replace(".pth", ""))
+
+device=sys.argv[1]
+is_half=sys.argv[2]
+webui_port_uvr5=int(sys.argv[3])
+is_share=eval(sys.argv[4])
+
+def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
+ infos = []
+ try:
+ inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+ save_root_vocal = (
+ save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+ )
+ save_root_ins = (
+ save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+ )
+ if model_name == "onnx_dereverb_By_FoxJoy":
+ from MDXNet import MDXNetDereverb
+
+ pre_fun = MDXNetDereverb(15)
+ else:
+ func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho
+ pre_fun = func(
+ agg=int(agg),
+ model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
+ device=device,
+ is_half=is_half,
+ )
+ if inp_root != "":
+ paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
+ else:
+ paths = [path.name for path in paths]
+ for path in paths:
+ inp_path = os.path.join(inp_root, path)
+ if(os.path.isfile(inp_path)==False):continue
+ need_reformat = 1
+ done = 0
+ try:
+ info = ffmpeg.probe(inp_path, cmd="ffprobe")
+ if (
+ info["streams"][0]["channels"] == 2
+ and info["streams"][0]["sample_rate"] == "44100"
+ ):
+ need_reformat = 0
+ pre_fun._path_audio_(
+ inp_path, save_root_ins, save_root_vocal, format0
+ )
+ done = 1
+ except:
+ need_reformat = 1
+ traceback.print_exc()
+ if need_reformat == 1:
+ tmp_path = "%s/%s.reformatted.wav" % (
+ os.path.join(os.environ["TEMP"]),
+ os.path.basename(inp_path),
+ )
+ os.system(
+ "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
+ % (inp_path, tmp_path)
+ )
+ inp_path = tmp_path
+ try:
+ if done == 0:
+ pre_fun._path_audio_(
+ inp_path, save_root_ins, save_root_vocal, format0
+ )
+ infos.append("%s->Success" % (os.path.basename(inp_path)))
+ yield "\n".join(infos)
+ except:
+ infos.append(
+ "%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
+ )
+ yield "\n".join(infos)
+ except:
+ infos.append(traceback.format_exc())
+ yield "\n".join(infos)
+ finally:
+ try:
+ if model_name == "onnx_dereverb_By_FoxJoy":
+ del pre_fun.pred.model
+ del pre_fun.pred.model_
+ else:
+ del pre_fun.model
+ del pre_fun
+ except:
+ traceback.print_exc()
+ print("clean_empty_cache")
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ yield "\n".join(infos)
+
+with gr.Blocks(title="UVR5 WebUI") as app:
+ gr.Markdown(
+ value=
+ i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
+ )
+ with gr.Tabs():
+ with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")):
+ with gr.Group():
+ gr.Markdown(
+ value=i18n(
+ "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。"
+ )
+ )
+ with gr.Row():
+ with gr.Column():
+ dir_wav_input = gr.Textbox(
+ label=i18n("输入待处理音频文件夹路径"),
+ placeholder="C:\\Users\\Desktop\\todo-songs",
+ )
+ wav_inputs = gr.File(
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
+ )
+ with gr.Column():
+ model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names)
+ agg = gr.Slider(
+ minimum=0,
+ maximum=20,
+ step=1,
+ label=i18n("人声提取激进程度"),
+ value=10,
+ interactive=True,
+ visible=False, # 先不开放调整
+ )
+ opt_vocal_root = gr.Textbox(
+ label=i18n("指定输出主人声文件夹"), value="output/uvr5_opt"
+ )
+ opt_ins_root = gr.Textbox(
+ label=i18n("指定输出非主人声文件夹"), value="output/uvr5_opt"
+ )
+ format0 = gr.Radio(
+ label=i18n("导出文件格式"),
+ choices=["wav", "flac", "mp3", "m4a"],
+ value="flac",
+ interactive=True,
+ )
+ but2 = gr.Button(i18n("转换"), variant="primary")
+ vc_output4 = gr.Textbox(label=i18n("输出信息"))
+ but2.click(
+ uvr,
+ [
+ model_choose,
+ dir_wav_input,
+ opt_vocal_root,
+ wav_inputs,
+ opt_ins_root,
+ agg,
+ format0,
+ ],
+ [vc_output4],
+ api_name="uvr_convert",
+ )
+app.queue(concurrency_count=511, max_size=1022).launch(
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=webui_port_uvr5,
+ quiet=True,
+)
From 9286a27ad3608cf81ef122c3b06a681765e7490e Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Sat, 3 Feb 2024 00:47:57 +0800
Subject: [PATCH 6/8] Add files via upload
---
GPT_SoVITS/text/cleaner.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/GPT_SoVITS/text/cleaner.py b/GPT_SoVITS/text/cleaner.py
index 8142f47d..92a18ebd 100644
--- a/GPT_SoVITS/text/cleaner.py
+++ b/GPT_SoVITS/text/cleaner.py
@@ -10,6 +10,9 @@ special = [
def clean_text(text, language):
+ if(language not in language_module_map):
+ language="en"
+ text=" "
for special_s, special_l, target_symbol in special:
if special_s in text and language == special_l:
return clean_special(text, language, special_s, target_symbol)
From 3ebff70b71580ee1f97b3238c9442cbc5aef47c7 Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Sat, 3 Feb 2024 00:48:00 +0800
Subject: [PATCH 7/8] Add files via upload
---
GPT_SoVITS/inference_webui.py | 96 +++++++++++++++++++++++------------
1 file changed, 64 insertions(+), 32 deletions(-)
diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py
index f733fd0f..1868a122 100644
--- a/GPT_SoVITS/inference_webui.py
+++ b/GPT_SoVITS/inference_webui.py
@@ -1,11 +1,18 @@
+'''
+按中英混合识别
+按日英混合识别
+多语种启动切分识别语种
+全部按中文识别
+全部按英文识别
+全部按日文识别
+'''
import os, re, logging
-
+import LangSegment
logging.getLogger("markdown_it").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpcore").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("asyncio").setLevel(logging.ERROR)
-
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
import pdb
@@ -193,9 +200,12 @@ def get_spepc(hps, filename):
dict_language = {
- i18n("中文"): "zh",
- i18n("英文"): "en",
- i18n("日文"): "ja"
+ i18n("中文"): "all_zh",#全部按中文识别
+ i18n("英文"): "en",#全部按英文识别#######不变
+ i18n("日文"): "all_ja",#全部按日文识别
+ i18n("中英混合"): "zh",#按中英混合识别####不变
+ i18n("日英混合"): "ja",#按日英混合识别####不变
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
}
@@ -235,15 +245,15 @@ def splite_en_inf(sentence, language):
def clean_text_inf(text, language):
- phones, word2ph, norm_text = clean_text(text, language)
+ phones, word2ph, norm_text = clean_text(text, language.replace("all_",""))
phones = cleaned_text_to_sequence(phones)
-
return phones, word2ph, norm_text
-
+dtype=torch.float16 if is_half == True else torch.float32
def get_bert_inf(phones, word2ph, norm_text, language):
+ language=language.replace("all_","")
if language == "zh":
- bert = get_bert_feature(norm_text, word2ph).to(device)
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
else:
bert = torch.zeros(
(1024, len(phones)),
@@ -254,7 +264,16 @@ def get_bert_inf(phones, word2ph, norm_text, language):
def nonen_clean_text_inf(text, language):
- textlist, langlist = splite_en_inf(text, language)
+ if(language!="auto"):
+ textlist, langlist = splite_en_inf(text, language)
+ else:
+ textlist=[]
+ langlist=[]
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
+ print(textlist)
+ print(langlist)
phones_list = []
word2ph_list = []
norm_text_list = []
@@ -262,9 +281,7 @@ def nonen_clean_text_inf(text, language):
lang = langlist[i]
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang)
phones_list.append(phones)
- if lang == "en" or "ja":
- pass
- else:
+ if lang == "zh":
word2ph_list.append(word2ph)
norm_text_list.append(norm_text)
print(word2ph_list)
@@ -276,7 +293,14 @@ def nonen_clean_text_inf(text, language):
def nonen_get_bert_inf(text, language):
- textlist, langlist = splite_en_inf(text, language)
+ if(language!="auto"):
+ textlist, langlist = splite_en_inf(text, language)
+ else:
+ textlist=[]
+ langlist=[]
+ for tmp in LangSegment.getTexts(text):
+ langlist.append(tmp["lang"])
+ textlist.append(tmp["text"])
print(textlist)
print(langlist)
bert_list = []
@@ -300,6 +324,24 @@ def get_first(text):
return text
+def get_cleaned_text_fianl(text,language):
+ if language in {"en","all_zh","all_ja"}:
+ phones, word2ph, norm_text = clean_text_inf(text, language)
+ elif language in {"zh", "ja","auto"}:
+ phones, word2ph, norm_text = nonen_clean_text_inf(text, language)
+ return phones, word2ph, norm_text
+
+def get_bert_final(phones, word2ph, norm_text,language,device):
+ if text_language == "en":
+ bert = get_bert_inf(phones, word2ph, norm_text, text_language)
+ elif text_language in {"zh", "ja","auto"}:
+ bert = nonen_get_bert_inf(text, text_language)
+ elif text_language == "all_zh":
+ bert = get_bert_feature(norm_text, word2ph).to(device)
+ else:
+ bert = torch.zeros((1024, len(phones))).to(device)
+ return bert
+
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切")):
t0 = ttime()
prompt_text = prompt_text.strip("\n")
@@ -335,10 +377,9 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
t1 = ttime()
prompt_language = dict_language[prompt_language]
text_language = dict_language[text_language]
- if prompt_language == "en":
- phones1, word2ph1, norm_text1 = clean_text_inf(prompt_text, prompt_language)
- else:
- phones1, word2ph1, norm_text1 = nonen_clean_text_inf(prompt_text, prompt_language)
+
+ phones1, word2ph1, norm_text1=get_cleaned_text_fianl(prompt_text, prompt_language)
+
if (how_to_cut == i18n("凑四句一切")):
text = cut1(text)
elif (how_to_cut == i18n("凑50字一切")):
@@ -353,25 +394,16 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
print(i18n("实际输入的目标文本(切句后):"), text)
texts = text.split("\n")
audio_opt = []
- if prompt_language == "en":
- bert1 = get_bert_inf(phones1, word2ph1, norm_text1, prompt_language)
- else:
- bert1 = nonen_get_bert_inf(prompt_text, prompt_language)
+ bert1=get_bert_final(phones1, word2ph1, norm_text1,prompt_language,device).to(dtype)
+
for text in texts:
# 解决输入目标文本的空行导致报错的问题
if (len(text.strip()) == 0):
continue
if (text[-1] not in splits): text += "。" if text_language != "en" else "."
print(i18n("实际输入的目标文本(每句):"), text)
- if text_language == "en":
- phones2, word2ph2, norm_text2 = clean_text_inf(text, text_language)
- else:
- phones2, word2ph2, norm_text2 = nonen_clean_text_inf(text, text_language)
-
- if text_language == "en":
- bert2 = get_bert_inf(phones2, word2ph2, norm_text2, text_language)
- else:
- bert2 = nonen_get_bert_inf(text, text_language)
+ phones2, word2ph2, norm_text2 = get_cleaned_text_fianl(text, text_language)
+ bert2 = get_bert_final(phones2, word2ph2, norm_text2, text_language, device).to(dtype)
bert = torch.cat([bert1, bert2], 1)
@@ -557,7 +589,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
with gr.Row():
text = gr.Textbox(label=i18n("需要合成的文本"), value="")
text_language = gr.Dropdown(
- label=i18n("需要合成的语种"), choices=[i18n("中文"), i18n("英文"), i18n("日文")], value=i18n("中文")
+ label=i18n("需要合成的语种"), choices=[i18n("中文"), i18n("英文"), i18n("日文"), i18n("中英混合"), i18n("日英混合"), i18n("多语种混合")], value=i18n("中文")
)
how_to_cut = gr.Radio(
label=i18n("怎么切"),
From 8d91183c4caeebdc99360e42309df068af4e75e5 Mon Sep 17 00:00:00 2001
From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com>
Date: Sat, 3 Feb 2024 00:53:44 +0800
Subject: [PATCH 8/8] Update Changelog_CN.md
---
docs/cn/Changelog_CN.md | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/docs/cn/Changelog_CN.md b/docs/cn/Changelog_CN.md
index 8125ba61..8d56db0b 100644
--- a/docs/cn/Changelog_CN.md
+++ b/docs/cn/Changelog_CN.md
@@ -75,6 +75,12 @@
3-增加按标点符号切分
+### 20240201更新
+
+1-修复uvr5读取格式错误导致分离失败的问题
+
+2-支持中日英混合多种文本自动切分识别语种
+
todolist:
@@ -84,4 +90,3 @@ todolist:
3-%百分号在文本里会导致error不能推理 还有 元/吨 会读成 元吨 而不是元每吨,像这类问题,是什么库来弄文本解析到语音的,应该怎么改善这个问题呀
-4-中日英、中英、日英 五种目标语言