Remove warnings and add some warnings (#1408)

This commit is contained in:
XXXXRT666 2024-08-06 18:37:55 +08:00 committed by GitHub
parent 2196b2bfc6
commit 805dbd7ff2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 61 additions and 10 deletions

View File

@ -4,6 +4,7 @@ import librosa
import torch
import torch.nn.functional as F
import soundfile as sf
import os
from transformers import logging as tf_logging
tf_logging.set_verbosity_error()
@ -24,9 +25,11 @@ cnhubert_base_path = None
class CNHubert(nn.Module):
def __init__(self):
super().__init__()
self.model = HubertModel.from_pretrained(cnhubert_base_path)
if os.path.exists(cnhubert_base_path):...
else:raise FileNotFoundError(cnhubert_base_path)
self.model = HubertModel.from_pretrained(cnhubert_base_path, local_files_only=True)
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
cnhubert_base_path
cnhubert_base_path, local_files_only=True
)
def forward(self, x):

View File

@ -389,6 +389,10 @@ def merge_short_text_in_array(texts, threshold):
cache= {}
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False):
global cache
if ref_wav_path:pass
else:gr.Warning(i18n('请上传参考音频'))
if text:pass
else:gr.Warning(i18n('请填入推理文本'))
t = []
if prompt_text is None or len(prompt_text) == 0:
ref_free = True
@ -413,6 +417,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
with torch.no_grad():
wav16k, sr = librosa.load(ref_wav_path, sr=16000)
if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
gr.Warning(i18n("参考音频在3~10秒范围外请更换"))
raise OSError(i18n("参考音频在3~10秒范围外请更换"))
wav16k = torch.from_numpy(wav16k)
zero_wav_torch = torch.from_numpy(zero_wav)

View File

@ -54,6 +54,8 @@ if os.path.exists(txt_path) == False:
# device = "mps"
else:
device = "cpu"
if os.path.exists(bert_pretrained_dir):...
else:raise FileNotFoundError(bert_pretrained_dir)
tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)
bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir)
if is_half == True:

View File

@ -34,6 +34,8 @@ logging.getLogger("numba").setLevel(logging.WARNING)
# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[5]
# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
if os.path.exists(pretrained_s2G):...
else:raise FileNotFoundError(pretrained_s2G)
hubert_dir = "%s/4-cnhubert" % (opt_dir)
semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)

View File

@ -48,7 +48,6 @@ from tools import my_utils
import traceback
import shutil
import pdb
import gradio as gr
from subprocess import Popen
import signal
from config import python_exec,infer_device,is_half,exp_root,webui_port_main,webui_port_infer_tts,webui_port_uvr5,webui_port_subfix,is_share
@ -63,7 +62,9 @@ from scipy.io import wavfile
from tools.my_utils import load_audio
from multiprocessing import cpu_count
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
import gradio.analytics as analytics
analytics.version_check = lambda:None
import gradio as gr
n_cpu=cpu_count()
ngpu = torch.cuda.device_count()
@ -248,6 +249,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang, asr_
if(p_asr==None):
asr_inp_dir=my_utils.clean_path(asr_inp_dir)
asr_opt_dir=my_utils.clean_path(asr_opt_dir)
check_for_exists([asr_inp_dir])
cmd = f'"{python_exec}" tools/asr/{asr_dict[asr_model]["path"]}'
cmd += f' -i "{asr_inp_dir}"'
cmd += f' -o "{asr_opt_dir}"'
@ -278,6 +280,7 @@ def open_denoise(denoise_inp_dir, denoise_opt_dir):
if(p_denoise==None):
denoise_inp_dir=my_utils.clean_path(denoise_inp_dir)
denoise_opt_dir=my_utils.clean_path(denoise_opt_dir)
check_for_exists([denoise_inp_dir])
cmd = '"%s" tools/cmd-denoise.py -i "%s" -o "%s" -p %s'%(python_exec,denoise_inp_dir,denoise_opt_dir,"float16"if is_half==True else "float32")
yield "语音降噪任务开启:%s"%cmd, {"__type__":"update","visible":False}, {"__type__":"update","visible":True}, {"__type__":"update"}, {"__type__":"update"}
@ -306,6 +309,7 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s
data=json.loads(data)
s2_dir="%s/%s"%(exp_root,exp_name)
os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True)
check_for_exists([s2_dir],is_train=True)
if(is_half==False):
data["train"]["fp16_run"]=False
batch_size=max(1,batch_size//2)
@ -322,6 +326,7 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s
data["data"]["exp_dir"]=data["s2_ckpt_dir"]=s2_dir
data["save_weight_dir"]=SoVITS_weight_root[-int(version[-1])+2]
data["name"]=exp_name
data["version"]=version
tmp_config_path="%s/tmp_s2.json"%tmp
with open(tmp_config_path,"w")as f:f.write(json.dumps(data))
@ -351,6 +356,7 @@ def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_
data=yaml.load(data, Loader=yaml.FullLoader)
s1_dir="%s/%s"%(exp_root,exp_name)
os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True)
check_for_exists([s1_dir],is_train=True)
if(is_half==False):
data["train"]["precision"]="32"
batch_size = max(1, batch_size // 2)
@ -395,13 +401,14 @@ def open_slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_k
global ps_slice
inp = my_utils.clean_path(inp)
opt_root = my_utils.clean_path(opt_root)
check_for_exists([inp])
if(os.path.exists(inp)==False):
yield "输入路径不存在", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}
yield "输入路径不存在", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
return
if os.path.isfile(inp):n_parts=1
elif os.path.isdir(inp):pass
else:
yield "输入路径存在但既不是文件也不是文件夹", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}
yield "输入路径存在但既不是文件也不是文件夹", {"__type__":"update","visible":True}, {"__type__":"update","visible":False}, {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
return
if (ps_slice == []):
for i_part in range(n_parts):
@ -433,6 +440,7 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
global ps1a
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
check_for_exists([inp_text,inp_wav_dir])
if (ps1a == []):
opt_dir="%s/%s"%(exp_root,exp_name)
config={
@ -494,6 +502,7 @@ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir):
global ps1b
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
check_for_exists([inp_text,inp_wav_dir])
if (ps1b == []):
config={
"inp_text":inp_text,
@ -541,6 +550,7 @@ ps1c=[]
def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path):
global ps1c
inp_text = my_utils.clean_path(inp_text)
check_for_exists([inp_text])
if (ps1c == []):
opt_dir="%s/%s"%(exp_root,exp_name)
config={
@ -599,6 +609,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb
global ps1abc
inp_text = my_utils.clean_path(inp_text)
inp_wav_dir = my_utils.clean_path(inp_wav_dir)
check_for_exists([inp_text,inp_wav_dir])
if (ps1abc == []):
opt_dir="%s/%s"%(exp_root,exp_name)
try:
@ -730,11 +741,39 @@ def switch_version(version_):
os.environ['version']=version_
global version
version = version_
if len(pretrained_gpt_name) > 1 and len(pretrained_sovits_name) > 1:
return {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D")}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}
if pretrained_sovits_name[-int(version[-1])+2] !='' and pretrained_gpt_name[-int(version[-1])+2] !='':...
else:
raise gr.Error(i18n(f'未下载{version.upper()}模型'))
gr.Warning(i18n(f'未下载{version.upper()}模型'))
return {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2].replace("s2G","s2D")}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_gpt_name[-int(version[-1])+2]}, {'__type__':'update', 'value':pretrained_sovits_name[-int(version[-1])+2]}
def check_for_exists(file_list=[],is_train=False):
_=[]
if is_train == True and file_list:
file_list.append(os.path.join(file_list[0],'2-name2text.txt'))
file_list.append(os.path.join(file_list[0],'3-bert'))
file_list.append(os.path.join(file_list[0],'4-cnhubert'))
file_list.append(os.path.join(file_list[0],'5-wav32k'))
file_list.append(os.path.join(file_list[0],'6-name2semantic.tsv'))
for file in file_list:
if os.path.exists(file):pass
else:_.append(file)
if _:
if is_train:
for i in _:
if i != '':
gr.Warning(i)
gr.Warning(i18n('以下文件或文件夹不存在:'))
else:
if len(_) == 1:
if _[0]:
gr.Warning(i)
gr.Warning(i18n('文件或文件夹不存在:'))
else:
for i in _:
if i != '':
gr.Warning(i)
gr.Warning(i18n('以下文件或文件夹不存在:'))
from text.g2pw import G2PWPinyin