diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 574b9d8..1ac11c3 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -67,6 +67,23 @@ elif torch.backends.mps.is_available(): else: device = "cpu" +# 操作记忆功能 + +file_path = './audio_log.txt' + +upload_audio_path = None +upload_audio_text = "" +upload_audio_lanuage = "中文" + +if os.path.exists(file_path): + with open(file_path, 'r',encoding="utf-8") as file: + text_data = file.read() + text_data = text_data.split("|") + + upload_audio_path = text_data[0] + upload_audio_text = text_data[1] + upload_audio_lanuage = text_data[2] + tokenizer = AutoTokenizer.from_pretrained(bert_path) bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) if is_half == True: @@ -263,6 +280,7 @@ def nonen_get_bert_inf(text, language): def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): + with open("./audio_log.txt","w",encoding="utf-8")as f:f.write(f"{wav_path_log}|{prompt_text}|{prompt_language}") t0 = ttime() prompt_text = prompt_text.strip("\n") prompt_language, text = prompt_language, text.strip("\n") @@ -482,10 +500,10 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: GPT_dropdown.change(change_gpt_weights,[GPT_dropdown],[]) gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): - inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath") - prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") + inp_ref = gr.Audio(label=i18n("请上传参考音频"), type="filepath",value=upload_audio_path) + prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value=upload_audio_text) prompt_language = gr.Dropdown( - label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n("中文") + label=i18n("参考音频的语种"),choices=[i18n("中文"),i18n("英文"),i18n("日文")],value=i18n(upload_audio_lanuage) ) gr.Markdown(value=i18n("*请填写需要合成的目标文本")) with gr.Row():