diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index 6bfee085..968c4cbf 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -1,3 +1,5 @@ +import warnings +warnings.filterwarnings("ignore") import copy import math import os diff --git a/tools/my_utils.py b/tools/my_utils.py index 56db27cf..93924f10 100644 --- a/tools/my_utils.py +++ b/tools/my_utils.py @@ -46,25 +46,30 @@ def check_for_existance(file_list:list=None,is_train=False,is_dataset_processing for file in file_list: if os.path.exists(file):files_status.append(True) else:files_status.append(False) - - if sum(files_status)!=0: + if sum(files_status)!=len(files_status): if is_train: for file,status in zip(file_list,files_status): if status:pass else:gr.Warning(file) gr.Warning(i18n('以下文件或文件夹不存在:')) + return False elif is_dataset_processing: - if not files_status[0]: + if files_status[0]: + return True + elif not files_status[0]: gr.Warning(file_list[0]) - if not files_status[1] and file_list[1]: + elif not files_status[1] and file_list[1]: gr.Warning(file_list[1]) gr.Warning(i18n('以下文件或文件夹不存在:')) + return False else: if file_list[0]: gr.Warning(file_list[0]) gr.Warning(i18n('以下文件或文件夹不存在:')) else: gr.Warning(i18n('路径不能为空')) + return False + return True def check_details(path_list=None,is_train=False,is_dataset_processing=False): if is_dataset_processing: @@ -78,7 +83,7 @@ def check_details(path_list=None,is_train=False,is_dataset_processing=False): return with open(list_path,"r",encoding="utf8")as f: line=f.readline().strip("\n").split("\n") - wav_name, spk_name, language, text = line.split("|") + wav_name, _, __, ___ = line[0].split("|") wav_name=clean_path(wav_name) if (audio_path != "" and audio_path != None): wav_name = os.path.basename(wav_name) @@ -89,6 +94,7 @@ def check_details(path_list=None,is_train=False,is_dataset_processing=False): ... else: gr.Warning(i18n('路径错误')) + return if is_train: path_list.append(os.path.join(path_list[0],'2-name2text.txt')) path_list.append(os.path.join(path_list[0],'4-cnhubert')) @@ -103,5 +109,5 @@ def check_details(path_list=None,is_train=False,is_dataset_processing=False): if os.listdir(wav_path):... else:gr.Warning(i18n('缺少音频数据集')) df = pd.read_csv(semantic_path) - if len(pd) > 1:... + if len(df) > 1:... else:gr.Warning(i18n('缺少语义数据集')) diff --git a/webui.py b/webui.py index a2be41bd..430dc315 100644 --- a/webui.py +++ b/webui.py @@ -200,6 +200,7 @@ def kill_process(pid): def change_label(path_list): global p_label if(p_label==None): + check_for_existance([path_list]) path_list=my_utils.clean_path(path_list) cmd = '"%s" tools/subfix_webui.py --load_list "%s" --webui_port %s --is_share %s'%(python_exec,path_list,webui_port_subfix,is_share) yield i18n("打标工具WebUI已开启"), {'__type__':'update','visible':False}, {'__type__':'update','visible':True} @@ -308,8 +309,8 @@ def open1Ba(batch_size,total_epoch,exp_name,text_low_lr_rate,if_save_latest,if_s data=json.loads(data) s2_dir="%s/%s"%(exp_root,exp_name) os.makedirs("%s/logs_s2"%(s2_dir),exist_ok=True) - check_for_existance([s2_dir],is_train=True) - check_details([s2_dir],is_train=True) + if check_for_existance([s2_dir],is_train=True): + check_details([s2_dir],is_train=True) if(is_half==False): data["train"]["fp16_run"]=False batch_size=max(1,batch_size//2) @@ -356,8 +357,8 @@ def open1Bb(batch_size,total_epoch,exp_name,if_dpo,if_save_latest,if_save_every_ data=yaml.load(data, Loader=yaml.FullLoader) s1_dir="%s/%s"%(exp_root,exp_name) os.makedirs("%s/logs_s1"%(s1_dir),exist_ok=True) - check_for_existance([s1_dir],is_train=True) - check_details([s1_dir],is_train=True) + if check_for_existance([s1_dir],is_train=True): + check_details([s1_dir],is_train=True) if(is_half==False): data["train"]["precision"]="32" batch_size = max(1, batch_size // 2) @@ -441,8 +442,8 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir): global ps1a inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) - check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True) - check_details([inp_text,inp_wav_dir], is_dataset_processing=True) + if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True): + check_details([inp_text,inp_wav_dir], is_dataset_processing=True) if (ps1a == []): opt_dir="%s/%s"%(exp_root,exp_name) config={ @@ -504,8 +505,8 @@ def open1b(inp_text,inp_wav_dir,exp_name,gpu_numbers,ssl_pretrained_dir): global ps1b inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) - check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True) - check_details([inp_text,inp_wav_dir], is_dataset_processing=True) + if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True): + check_details([inp_text,inp_wav_dir], is_dataset_processing=True) if (ps1b == []): config={ "inp_text":inp_text, @@ -553,8 +554,8 @@ ps1c=[] def open1c(inp_text,exp_name,gpu_numbers,pretrained_s2G_path): global ps1c inp_text = my_utils.clean_path(inp_text) - check_for_existance([inp_text,''], is_dataset_processing=True) - check_details([inp_text,''], is_dataset_processing=True) + if check_for_existance([inp_text,''], is_dataset_processing=True): + check_details([inp_text,''], is_dataset_processing=True) if (ps1c == []): opt_dir="%s/%s"%(exp_root,exp_name) config={ @@ -613,8 +614,8 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb global ps1abc inp_text = my_utils.clean_path(inp_text) inp_wav_dir = my_utils.clean_path(inp_wav_dir) - check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True) - check_details([inp_text,inp_wav_dir], is_dataset_processing=True) + if check_for_existance([inp_text,inp_wav_dir], is_dataset_processing=True): + check_details([inp_text,inp_wav_dir], is_dataset_processing=True) if (ps1abc == []): opt_dir="%s/%s"%(exp_root,exp_name) try: