Update gui.py&webui.py (#1216)

1. Fix the issue that gui needs to reload models for each inference
2. Fix the issue that webui would be toggled by relevant import
3. Complement missing package import
4. Simplify GUI's code and address various inefficiencies, including: enabling direct input of ref text and target text (akin to the WebUI), facilitating file selection for ref audio uploads, adding language options for CH-EN/JA-EN/Multi (with Multi as the default), standardizing variable name to enhance readability.
This commit is contained in:
Spr_Aachen 2024-06-21 18:05:16 +08:00 committed by GitHub
parent db50670598
commit d8bcc732d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 53 additions and 81 deletions

View File

@ -1,3 +1,4 @@
import os
import sys
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
@ -7,16 +8,16 @@ import soundfile as sf
from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav
class GPTSoVITSGUI(QMainWindow):
GPT_Path = gpt_path
SoVITS_Path = sovits_path
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle('GPT-SoVITS GUI')
self.setGeometry(800, 450, 950, 850)
@ -71,6 +72,7 @@ class GPTSoVITSGUI(QMainWindow):
self.GPT_model_label = QLabel("选择GPT模型:")
self.GPT_model_input = QLineEdit()
self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
self.GPT_model_input.setText(self.GPT_Path)
self.GPT_model_input.setReadOnly(True)
self.GPT_model_button = QPushButton("选择GPT模型文件")
self.GPT_model_button.clicked.connect(self.select_GPT_model)
@ -78,6 +80,7 @@ class GPTSoVITSGUI(QMainWindow):
self.SoVITS_model_label = QLabel("选择SoVITS模型:")
self.SoVITS_model_input = QLineEdit()
self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
self.SoVITS_model_input.setText(self.SoVITS_Path)
self.SoVITS_model_input.setReadOnly(True)
self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
@ -91,25 +94,25 @@ class GPTSoVITSGUI(QMainWindow):
self.ref_text_label = QLabel("参考音频文本:")
self.ref_text_input = QLineEdit()
self.ref_text_input.setPlaceholderText("拖拽或选择文件")
self.ref_text_input.setReadOnly(True)
self.ref_text_input.setPlaceholderText("直接输入文字或上传文本")
self.ref_text_button = QPushButton("上传文本")
self.ref_text_button.clicked.connect(self.upload_ref_text)
self.language_label = QLabel("参考音频语言:")
self.language_combobox = QComboBox()
self.language_combobox.addItems(["中文", "英文", "日文"])
self.ref_language_label = QLabel("参考音频语言:")
self.ref_language_combobox = QComboBox()
self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
self.ref_language_combobox.setCurrentText("多语种混合")
self.target_text_label = QLabel("合成目标文本:")
self.target_text_input = QLineEdit()
self.target_text_input.setPlaceholderText("拖拽或选择文件")
self.target_text_input.setReadOnly(True)
self.target_text_input.setPlaceholderText("直接输入文字或上传文本")
self.target_text_button = QPushButton("上传文本")
self.target_text_button.clicked.connect(self.upload_target_text)
self.language_label_02 = QLabel("合成音频语言:")
self.language_combobox_02 = QComboBox()
self.language_combobox_02.addItems(["中文", "英文", "日文"])
self.target_language_label = QLabel("合成音频语言:")
self.target_language_combobox = QComboBox()
self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
self.target_language_combobox.setCurrentText("多语种混合")
self.output_label = QLabel("输出音频路径:")
self.output_input = QLineEdit()
@ -140,10 +143,8 @@ class GPTSoVITSGUI(QMainWindow):
main_layout = QVBoxLayout()
input_layout = QGridLayout()
input_layout.setSpacing(10)
self.setLayout(input_layout)
input_layout = QGridLayout(self)
input_layout.setSpacing(10)
input_layout.addWidget(license_label, 0, 0, 1, 3)
@ -159,22 +160,22 @@ class GPTSoVITSGUI(QMainWindow):
input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
input_layout.addWidget(self.ref_audio_button, 6, 2)
input_layout.addWidget(self.language_label, 7, 0)
input_layout.addWidget(self.language_combobox, 8, 0, 1, 1)
input_layout.addWidget(self.ref_language_label, 7, 0)
input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1)
input_layout.addWidget(self.ref_text_label, 9, 0)
input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
input_layout.addWidget(self.ref_text_button, 10, 2)
input_layout.addWidget(self.language_label_02, 11, 0)
input_layout.addWidget(self.language_combobox_02, 12, 0, 1, 1)
input_layout.addWidget(self.target_language_label, 11, 0)
input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1)
input_layout.addWidget(self.target_text_label, 13, 0)
input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
input_layout.addWidget(self.target_text_button, 14, 2)
input_layout.addWidget(self.output_label, 15, 0)
input_layout.addWidget(self.output_input, 16, 0, 1, 2)
input_layout.addWidget(self.output_button, 16, 2)
main_layout.addLayout(input_layout)
output_layout = QVBoxLayout()
@ -198,10 +199,8 @@ class GPTSoVITSGUI(QMainWindow):
def dropEvent(self, event):
if event.mimeData().hasUrls():
file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
@ -211,23 +210,13 @@ class GPTSoVITSGUI(QMainWindow):
widget.installEventFilter(self)
def eventFilter(self, obj, event):
if event.type() == QEvent.DragEnter:
if event.type() in (QEvent.DragEnter, QEvent.Drop):
mime_data = event.mimeData()
if mime_data.hasUrls():
event.acceptProposedAction()
elif event.type() == QEvent.Drop:
mime_data = event.mimeData()
if mime_data.hasUrls():
file_paths = [url.toLocalFile() for url in mime_data.urls()]
if len(file_paths) == 1:
self.update_input_paths(obj, file_paths[0])
else:
self.update_input_paths(obj, ", ".join(file_paths))
event.acceptProposedAction()
return super().eventFilter(obj, event)
def select_GPT_model(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)")
if file_path:
@ -239,24 +228,9 @@ class GPTSoVITSGUI(QMainWindow):
self.SoVITS_model_input.setText(file_path)
def select_ref_audio(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.ShowDirsOnly
file_dialog = QFileDialog()
file_dialog.setOptions(options)
file_dialog.setFileMode(QFileDialog.AnyFile)
file_dialog.setNameFilter("Audio Files (*.wav *.mp3)")
if file_dialog.exec_():
file_paths = file_dialog.selectedFiles()
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)")
if file_path:
self.update_ref_audio(file_path)
def upload_ref_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
@ -264,7 +238,6 @@ class GPTSoVITSGUI(QMainWindow):
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.ref_text_input.setText(content)
self.update_input_paths(self.ref_text_input, file_path)
def upload_target_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
@ -272,7 +245,6 @@ class GPTSoVITSGUI(QMainWindow):
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.target_text_input.setText(content)
self.update_input_paths(self.target_text_input, file_path)
def select_output_path(self):
options = QFileDialog.Options()
@ -290,9 +262,6 @@ class GPTSoVITSGUI(QMainWindow):
def update_ref_audio(self, file_path):
self.ref_audio_input.setText(file_path)
def update_input_paths(self, input_box, file_path):
input_box.setText(file_path)
def clear_output(self):
self.output_text.clear()
@ -300,23 +269,27 @@ class GPTSoVITSGUI(QMainWindow):
GPT_model_path = self.GPT_model_input.text()
SoVITS_model_path = self.SoVITS_model_input.text()
ref_audio_path = self.ref_audio_input.text()
language_combobox = self.language_combobox.currentText()
language_combobox = self.ref_language_combobox.currentText()
language_combobox = i18n(language_combobox)
ref_text = self.ref_text_input.text()
language_combobox_02 = self.language_combobox_02.currentText()
language_combobox_02 = i18n(language_combobox_02)
target_language_combobox = self.target_language_combobox.currentText()
target_language_combobox = i18n(target_language_combobox)
target_text = self.target_text_input.text()
output_path = self.output_input.text()
change_gpt_weights(gpt_path=GPT_model_path)
change_sovits_weights(sovits_path=SoVITS_model_path)
if GPT_model_path != self.GPT_Path:
change_gpt_weights(gpt_path=GPT_model_path)
self.GPT_Path = GPT_model_path
if SoVITS_model_path != self.SoVITS_Path:
change_sovits_weights(sovits_path=SoVITS_model_path)
self.SoVITS_Path = SoVITS_model_path
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
prompt_text=ref_text,
prompt_language=language_combobox,
text=target_text,
text_language=language_combobox_02)
text_language=target_language_combobox)
result_list = list(synthesis_result)
if result_list:
@ -329,12 +302,9 @@ class GPTSoVITSGUI(QMainWindow):
self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
self.output_text.append("处理结果:\n" + result)
def main():
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = GPTSoVITSGUI()
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
sys.exit(app.exec_())

View File

@ -639,10 +639,12 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
button5.click(cut5, [text_inp], [text_opt])
gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
app.queue(concurrency_count=511, max_size=1022).launch(
server_name="0.0.0.0",
inbrowser=True,
share=is_share,
server_port=infer_ttswebui,
quiet=True,
)
if __name__ == '__main__':
app.queue(concurrency_count=511, max_size=1022).launch(
server_name="0.0.0.0",
inbrowser=True,
share=is_share,
server_port=infer_ttswebui,
quiet=True,
)