mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-04-05 19:41:56 +08:00
delete the training part and ui part
This commit is contained in:
parent
165882d64f
commit
703234edde
@ -1,55 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import os
|
|
||||||
import soundfile as sf
|
|
||||||
|
|
||||||
from tools.i18n.i18n import I18nAuto
|
|
||||||
from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
|
|
||||||
|
|
||||||
i18n = I18nAuto()
|
|
||||||
|
|
||||||
def synthesize(GPT_model_path, SoVITS_model_path, ref_audio_path, ref_text_path, ref_language, target_text_path, target_language, output_path):
|
|
||||||
# Read reference text
|
|
||||||
with open(ref_text_path, 'r', encoding='utf-8') as file:
|
|
||||||
ref_text = file.read()
|
|
||||||
|
|
||||||
# Read target text
|
|
||||||
with open(target_text_path, 'r', encoding='utf-8') as file:
|
|
||||||
target_text = file.read()
|
|
||||||
|
|
||||||
# Change model weights
|
|
||||||
change_gpt_weights(gpt_path=GPT_model_path)
|
|
||||||
change_sovits_weights(sovits_path=SoVITS_model_path)
|
|
||||||
|
|
||||||
# Synthesize audio
|
|
||||||
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
|
|
||||||
prompt_text=ref_text,
|
|
||||||
prompt_language=i18n(ref_language),
|
|
||||||
text=target_text,
|
|
||||||
text_language=i18n(target_language), top_p=1, temperature=1)
|
|
||||||
|
|
||||||
result_list = list(synthesis_result)
|
|
||||||
|
|
||||||
if result_list:
|
|
||||||
last_sampling_rate, last_audio_data = result_list[-1]
|
|
||||||
output_wav_path = os.path.join(output_path, "output.wav")
|
|
||||||
sf.write(output_wav_path, last_audio_data, last_sampling_rate)
|
|
||||||
print(f"Audio saved to {output_wav_path}")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool")
|
|
||||||
parser.add_argument('--gpt_model', required=True, help="Path to the GPT model file")
|
|
||||||
parser.add_argument('--sovits_model', required=True, help="Path to the SoVITS model file")
|
|
||||||
parser.add_argument('--ref_audio', required=True, help="Path to the reference audio file")
|
|
||||||
parser.add_argument('--ref_text', required=True, help="Path to the reference text file")
|
|
||||||
parser.add_argument('--ref_language', required=True, choices=["中文", "英文", "日文"], help="Language of the reference audio")
|
|
||||||
parser.add_argument('--target_text', required=True, help="Path to the target text file")
|
|
||||||
parser.add_argument('--target_language', required=True, choices=["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"], help="Language of the target text")
|
|
||||||
parser.add_argument('--output_path', required=True, help="Path to the output directory")
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
synthesize(args.gpt_model, args.sovits_model, args.ref_audio, args.ref_text, args.ref_language, args.target_text, args.target_language, args.output_path)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
@ -1,310 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
from PyQt5.QtCore import QEvent
|
|
||||||
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
|
|
||||||
from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox
|
|
||||||
import soundfile as sf
|
|
||||||
|
|
||||||
from tools.i18n.i18n import I18nAuto
|
|
||||||
i18n = I18nAuto()
|
|
||||||
|
|
||||||
from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav
|
|
||||||
|
|
||||||
|
|
||||||
class GPTSoVITSGUI(QMainWindow):
|
|
||||||
GPT_Path = gpt_path
|
|
||||||
SoVITS_Path = sovits_path
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.setWindowTitle('GPT-SoVITS GUI')
|
|
||||||
self.setGeometry(800, 450, 950, 850)
|
|
||||||
|
|
||||||
self.setStyleSheet("""
|
|
||||||
QWidget {
|
|
||||||
background-color: #a3d3b1;
|
|
||||||
}
|
|
||||||
|
|
||||||
QTabWidget::pane {
|
|
||||||
background-color: #a3d3b1;
|
|
||||||
}
|
|
||||||
|
|
||||||
QTabWidget::tab-bar {
|
|
||||||
alignment: left;
|
|
||||||
}
|
|
||||||
|
|
||||||
QTabBar::tab {
|
|
||||||
background: #8da4bf;
|
|
||||||
color: #ffffff;
|
|
||||||
padding: 8px;
|
|
||||||
}
|
|
||||||
|
|
||||||
QTabBar::tab:selected {
|
|
||||||
background: #2a3f54;
|
|
||||||
}
|
|
||||||
|
|
||||||
QLabel {
|
|
||||||
color: #000000;
|
|
||||||
}
|
|
||||||
|
|
||||||
QPushButton {
|
|
||||||
background-color: #4CAF50;
|
|
||||||
color: white;
|
|
||||||
padding: 8px;
|
|
||||||
border: 1px solid #4CAF50;
|
|
||||||
border-radius: 4px;
|
|
||||||
}
|
|
||||||
|
|
||||||
QPushButton:hover {
|
|
||||||
background-color: #45a049;
|
|
||||||
border: 1px solid #45a049;
|
|
||||||
box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1);
|
|
||||||
}
|
|
||||||
""")
|
|
||||||
|
|
||||||
license_text = (
|
|
||||||
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. "
|
|
||||||
"如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
|
|
||||||
license_label = QLabel(license_text)
|
|
||||||
license_label.setWordWrap(True)
|
|
||||||
|
|
||||||
self.GPT_model_label = QLabel("选择GPT模型:")
|
|
||||||
self.GPT_model_input = QLineEdit()
|
|
||||||
self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
|
|
||||||
self.GPT_model_input.setText(self.GPT_Path)
|
|
||||||
self.GPT_model_input.setReadOnly(True)
|
|
||||||
self.GPT_model_button = QPushButton("选择GPT模型文件")
|
|
||||||
self.GPT_model_button.clicked.connect(self.select_GPT_model)
|
|
||||||
|
|
||||||
self.SoVITS_model_label = QLabel("选择SoVITS模型:")
|
|
||||||
self.SoVITS_model_input = QLineEdit()
|
|
||||||
self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
|
|
||||||
self.SoVITS_model_input.setText(self.SoVITS_Path)
|
|
||||||
self.SoVITS_model_input.setReadOnly(True)
|
|
||||||
self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
|
|
||||||
self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
|
|
||||||
|
|
||||||
self.ref_audio_label = QLabel("上传参考音频:")
|
|
||||||
self.ref_audio_input = QLineEdit()
|
|
||||||
self.ref_audio_input.setPlaceholderText("拖拽或选择文件")
|
|
||||||
self.ref_audio_input.setReadOnly(True)
|
|
||||||
self.ref_audio_button = QPushButton("选择音频文件")
|
|
||||||
self.ref_audio_button.clicked.connect(self.select_ref_audio)
|
|
||||||
|
|
||||||
self.ref_text_label = QLabel("参考音频文本:")
|
|
||||||
self.ref_text_input = QLineEdit()
|
|
||||||
self.ref_text_input.setPlaceholderText("直接输入文字或上传文本")
|
|
||||||
self.ref_text_button = QPushButton("上传文本")
|
|
||||||
self.ref_text_button.clicked.connect(self.upload_ref_text)
|
|
||||||
|
|
||||||
self.ref_language_label = QLabel("参考音频语言:")
|
|
||||||
self.ref_language_combobox = QComboBox()
|
|
||||||
self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
|
|
||||||
self.ref_language_combobox.setCurrentText("多语种混合")
|
|
||||||
|
|
||||||
self.target_text_label = QLabel("合成目标文本:")
|
|
||||||
self.target_text_input = QLineEdit()
|
|
||||||
self.target_text_input.setPlaceholderText("直接输入文字或上传文本")
|
|
||||||
self.target_text_button = QPushButton("上传文本")
|
|
||||||
self.target_text_button.clicked.connect(self.upload_target_text)
|
|
||||||
|
|
||||||
self.target_language_label = QLabel("合成音频语言:")
|
|
||||||
self.target_language_combobox = QComboBox()
|
|
||||||
self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
|
|
||||||
self.target_language_combobox.setCurrentText("多语种混合")
|
|
||||||
|
|
||||||
self.output_label = QLabel("输出音频路径:")
|
|
||||||
self.output_input = QLineEdit()
|
|
||||||
self.output_input.setPlaceholderText("拖拽或选择文件")
|
|
||||||
self.output_input.setReadOnly(True)
|
|
||||||
self.output_button = QPushButton("选择文件夹")
|
|
||||||
self.output_button.clicked.connect(self.select_output_path)
|
|
||||||
|
|
||||||
self.output_text = QTextEdit()
|
|
||||||
self.output_text.setReadOnly(True)
|
|
||||||
|
|
||||||
self.add_drag_drop_events([
|
|
||||||
self.GPT_model_input,
|
|
||||||
self.SoVITS_model_input,
|
|
||||||
self.ref_audio_input,
|
|
||||||
self.ref_text_input,
|
|
||||||
self.target_text_input,
|
|
||||||
self.output_input,
|
|
||||||
])
|
|
||||||
|
|
||||||
self.synthesize_button = QPushButton("合成")
|
|
||||||
self.synthesize_button.clicked.connect(self.synthesize)
|
|
||||||
|
|
||||||
self.clear_output_button = QPushButton("清空输出")
|
|
||||||
self.clear_output_button.clicked.connect(self.clear_output)
|
|
||||||
|
|
||||||
self.status_bar = QStatusBar()
|
|
||||||
|
|
||||||
main_layout = QVBoxLayout()
|
|
||||||
|
|
||||||
input_layout = QGridLayout(self)
|
|
||||||
input_layout.setSpacing(10)
|
|
||||||
|
|
||||||
input_layout.addWidget(license_label, 0, 0, 1, 3)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.GPT_model_label, 1, 0)
|
|
||||||
input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.GPT_model_button, 2, 2)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.SoVITS_model_label, 3, 0)
|
|
||||||
input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.SoVITS_model_button, 4, 2)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.ref_audio_label, 5, 0)
|
|
||||||
input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.ref_audio_button, 6, 2)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.ref_language_label, 7, 0)
|
|
||||||
input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1)
|
|
||||||
input_layout.addWidget(self.ref_text_label, 9, 0)
|
|
||||||
input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.ref_text_button, 10, 2)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.target_language_label, 11, 0)
|
|
||||||
input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1)
|
|
||||||
input_layout.addWidget(self.target_text_label, 13, 0)
|
|
||||||
input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.target_text_button, 14, 2)
|
|
||||||
|
|
||||||
input_layout.addWidget(self.output_label, 15, 0)
|
|
||||||
input_layout.addWidget(self.output_input, 16, 0, 1, 2)
|
|
||||||
input_layout.addWidget(self.output_button, 16, 2)
|
|
||||||
|
|
||||||
main_layout.addLayout(input_layout)
|
|
||||||
|
|
||||||
output_layout = QVBoxLayout()
|
|
||||||
output_layout.addWidget(self.output_text)
|
|
||||||
main_layout.addLayout(output_layout)
|
|
||||||
|
|
||||||
main_layout.addWidget(self.synthesize_button)
|
|
||||||
|
|
||||||
main_layout.addWidget(self.clear_output_button)
|
|
||||||
|
|
||||||
main_layout.addWidget(self.status_bar)
|
|
||||||
|
|
||||||
self.central_widget = QWidget()
|
|
||||||
self.central_widget.setLayout(main_layout)
|
|
||||||
self.setCentralWidget(self.central_widget)
|
|
||||||
|
|
||||||
def dragEnterEvent(self, event):
|
|
||||||
if event.mimeData().hasUrls():
|
|
||||||
event.acceptProposedAction()
|
|
||||||
|
|
||||||
def dropEvent(self, event):
|
|
||||||
if event.mimeData().hasUrls():
|
|
||||||
file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
|
|
||||||
if len(file_paths) == 1:
|
|
||||||
self.update_ref_audio(file_paths[0])
|
|
||||||
else:
|
|
||||||
self.update_ref_audio(", ".join(file_paths))
|
|
||||||
|
|
||||||
def add_drag_drop_events(self, widgets):
|
|
||||||
for widget in widgets:
|
|
||||||
widget.setAcceptDrops(True)
|
|
||||||
widget.installEventFilter(self)
|
|
||||||
|
|
||||||
def eventFilter(self, obj, event):
|
|
||||||
if event.type() in (QEvent.DragEnter, QEvent.Drop):
|
|
||||||
mime_data = event.mimeData()
|
|
||||||
if mime_data.hasUrls():
|
|
||||||
event.acceptProposedAction()
|
|
||||||
|
|
||||||
return super().eventFilter(obj, event)
|
|
||||||
|
|
||||||
def select_GPT_model(self):
|
|
||||||
file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)")
|
|
||||||
if file_path:
|
|
||||||
self.GPT_model_input.setText(file_path)
|
|
||||||
|
|
||||||
def select_SoVITS_model(self):
|
|
||||||
file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)")
|
|
||||||
if file_path:
|
|
||||||
self.SoVITS_model_input.setText(file_path)
|
|
||||||
|
|
||||||
def select_ref_audio(self):
|
|
||||||
file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)")
|
|
||||||
if file_path:
|
|
||||||
self.update_ref_audio(file_path)
|
|
||||||
|
|
||||||
def upload_ref_text(self):
|
|
||||||
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
|
|
||||||
if file_path:
|
|
||||||
with open(file_path, 'r', encoding='utf-8') as file:
|
|
||||||
content = file.read()
|
|
||||||
self.ref_text_input.setText(content)
|
|
||||||
|
|
||||||
def upload_target_text(self):
|
|
||||||
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
|
|
||||||
if file_path:
|
|
||||||
with open(file_path, 'r', encoding='utf-8') as file:
|
|
||||||
content = file.read()
|
|
||||||
self.target_text_input.setText(content)
|
|
||||||
|
|
||||||
def select_output_path(self):
|
|
||||||
options = QFileDialog.Options()
|
|
||||||
options |= QFileDialog.DontUseNativeDialog
|
|
||||||
options |= QFileDialog.ShowDirsOnly
|
|
||||||
|
|
||||||
folder_dialog = QFileDialog()
|
|
||||||
folder_dialog.setOptions(options)
|
|
||||||
folder_dialog.setFileMode(QFileDialog.Directory)
|
|
||||||
|
|
||||||
if folder_dialog.exec_():
|
|
||||||
folder_path = folder_dialog.selectedFiles()[0]
|
|
||||||
self.output_input.setText(folder_path)
|
|
||||||
|
|
||||||
def update_ref_audio(self, file_path):
|
|
||||||
self.ref_audio_input.setText(file_path)
|
|
||||||
|
|
||||||
def clear_output(self):
|
|
||||||
self.output_text.clear()
|
|
||||||
|
|
||||||
def synthesize(self):
|
|
||||||
GPT_model_path = self.GPT_model_input.text()
|
|
||||||
SoVITS_model_path = self.SoVITS_model_input.text()
|
|
||||||
ref_audio_path = self.ref_audio_input.text()
|
|
||||||
language_combobox = self.ref_language_combobox.currentText()
|
|
||||||
language_combobox = i18n(language_combobox)
|
|
||||||
ref_text = self.ref_text_input.text()
|
|
||||||
target_language_combobox = self.target_language_combobox.currentText()
|
|
||||||
target_language_combobox = i18n(target_language_combobox)
|
|
||||||
target_text = self.target_text_input.text()
|
|
||||||
output_path = self.output_input.text()
|
|
||||||
|
|
||||||
if GPT_model_path != self.GPT_Path:
|
|
||||||
change_gpt_weights(gpt_path=GPT_model_path)
|
|
||||||
self.GPT_Path = GPT_model_path
|
|
||||||
if SoVITS_model_path != self.SoVITS_Path:
|
|
||||||
change_sovits_weights(sovits_path=SoVITS_model_path)
|
|
||||||
self.SoVITS_Path = SoVITS_model_path
|
|
||||||
|
|
||||||
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
|
|
||||||
prompt_text=ref_text,
|
|
||||||
prompt_language=language_combobox,
|
|
||||||
text=target_text,
|
|
||||||
text_language=target_language_combobox)
|
|
||||||
|
|
||||||
result_list = list(synthesis_result)
|
|
||||||
|
|
||||||
if result_list:
|
|
||||||
last_sampling_rate, last_audio_data = result_list[-1]
|
|
||||||
output_wav_path = os.path.join(output_path, "output.wav")
|
|
||||||
sf.write(output_wav_path, last_audio_data, last_sampling_rate)
|
|
||||||
|
|
||||||
result = "Audio saved to " + output_wav_path
|
|
||||||
|
|
||||||
self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
|
|
||||||
self.output_text.append("处理结果:\n" + result)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app = QApplication(sys.argv)
|
|
||||||
mainWin = GPTSoVITSGUI()
|
|
||||||
mainWin.show()
|
|
||||||
sys.exit(app.exec_())
|
|
@ -1,952 +0,0 @@
|
|||||||
'''
|
|
||||||
按中英混合识别
|
|
||||||
按日英混合识别
|
|
||||||
多语种启动切分识别语种
|
|
||||||
全部按中文识别
|
|
||||||
全部按英文识别
|
|
||||||
全部按日文识别
|
|
||||||
'''
|
|
||||||
import logging
|
|
||||||
import traceback,torchaudio,warnings
|
|
||||||
logging.getLogger("markdown_it").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("urllib3").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("httpx").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("asyncio").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("multipart.multipart").setLevel(logging.ERROR)
|
|
||||||
warnings.simplefilter(action='ignore', category=FutureWarning)
|
|
||||||
|
|
||||||
import os, re, sys, json
|
|
||||||
import pdb
|
|
||||||
import torch
|
|
||||||
from text.LangSegmenter import LangSegmenter
|
|
||||||
|
|
||||||
try:
|
|
||||||
import gradio.analytics as analytics
|
|
||||||
analytics.version_check = lambda:None
|
|
||||||
except:...
|
|
||||||
version=model_version=os.environ.get("version","v2")
|
|
||||||
path_sovits_v3="GPT_SoVITS/pretrained_models/s2Gv3.pth"
|
|
||||||
is_exist_s2gv3=os.path.exists(path_sovits_v3)
|
|
||||||
pretrained_sovits_name=["GPT_SoVITS/pretrained_models/s2G488k.pth", "GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth",path_sovits_v3]
|
|
||||||
pretrained_gpt_name=["GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt","GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1v3.ckpt"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
_ =[[],[]]
|
|
||||||
for i in range(3):
|
|
||||||
if os.path.exists(pretrained_gpt_name[i]):_[0].append(pretrained_gpt_name[i])
|
|
||||||
if os.path.exists(pretrained_sovits_name[i]):_[-1].append(pretrained_sovits_name[i])
|
|
||||||
pretrained_gpt_name,pretrained_sovits_name = _
|
|
||||||
|
|
||||||
|
|
||||||
if os.path.exists(f"./weight.json"):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
with open(f"./weight.json", 'w', encoding="utf-8") as file:json.dump({'GPT':{},'SoVITS':{}},file)
|
|
||||||
|
|
||||||
with open(f"./weight.json", 'r', encoding="utf-8") as file:
|
|
||||||
weight_data = file.read()
|
|
||||||
weight_data=json.loads(weight_data)
|
|
||||||
gpt_path = os.environ.get(
|
|
||||||
"gpt_path", weight_data.get('GPT',{}).get(version,pretrained_gpt_name))
|
|
||||||
sovits_path = os.environ.get(
|
|
||||||
"sovits_path", weight_data.get('SoVITS',{}).get(version,pretrained_sovits_name))
|
|
||||||
if isinstance(gpt_path,list):
|
|
||||||
gpt_path = gpt_path[0]
|
|
||||||
if isinstance(sovits_path,list):
|
|
||||||
sovits_path = sovits_path[0]
|
|
||||||
|
|
||||||
# gpt_path = os.environ.get(
|
|
||||||
# "gpt_path", pretrained_gpt_name
|
|
||||||
# )
|
|
||||||
# sovits_path = os.environ.get("sovits_path", pretrained_sovits_name)
|
|
||||||
cnhubert_base_path = os.environ.get(
|
|
||||||
"cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
|
||||||
)
|
|
||||||
bert_path = os.environ.get(
|
|
||||||
"bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
|
|
||||||
)
|
|
||||||
infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
|
|
||||||
infer_ttswebui = int(infer_ttswebui)
|
|
||||||
is_share = os.environ.get("is_share", "False")
|
|
||||||
is_share = eval(is_share)
|
|
||||||
if "_CUDA_VISIBLE_DEVICES" in os.environ:
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
|
|
||||||
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
|
|
||||||
# is_half=False
|
|
||||||
punctuation = set(['!', '?', '…', ',', '.', '-'," "])
|
|
||||||
import gradio as gr
|
|
||||||
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
|
||||||
import numpy as np
|
|
||||||
import librosa
|
|
||||||
from feature_extractor import cnhubert
|
|
||||||
|
|
||||||
cnhubert.cnhubert_base_path = cnhubert_base_path
|
|
||||||
|
|
||||||
from GPT_SoVITS.module.models import SynthesizerTrn,SynthesizerTrnV3
|
|
||||||
import numpy as np
|
|
||||||
import random
|
|
||||||
def set_seed(seed):
|
|
||||||
if seed == -1:
|
|
||||||
seed = random.randint(0, 1000000)
|
|
||||||
seed = int(seed)
|
|
||||||
random.seed(seed)
|
|
||||||
os.environ["PYTHONHASHSEED"] = str(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed(seed)
|
|
||||||
# set_seed(42)
|
|
||||||
|
|
||||||
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
|
||||||
from text import cleaned_text_to_sequence
|
|
||||||
from text.cleaner import clean_text
|
|
||||||
from time import time as ttime
|
|
||||||
from tools.my_utils import load_audio
|
|
||||||
from tools.i18n.i18n import I18nAuto, scan_language_list
|
|
||||||
from peft import LoraConfig, PeftModel, get_peft_model
|
|
||||||
|
|
||||||
language=os.environ.get("language","Auto")
|
|
||||||
language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
|
|
||||||
i18n = I18nAuto(language=language)
|
|
||||||
|
|
||||||
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
device = "cuda"
|
|
||||||
else:
|
|
||||||
device = "cpu"
|
|
||||||
|
|
||||||
dict_language_v1 = {
|
|
||||||
i18n("中文"): "all_zh",#全部按中文识别
|
|
||||||
i18n("英文"): "en",#全部按英文识别#######不变
|
|
||||||
i18n("日文"): "all_ja",#全部按日文识别
|
|
||||||
i18n("中英混合"): "zh",#按中英混合识别####不变
|
|
||||||
i18n("日英混合"): "ja",#按日英混合识别####不变
|
|
||||||
i18n("多语种混合"): "auto",#多语种启动切分识别语种
|
|
||||||
}
|
|
||||||
dict_language_v2 = {
|
|
||||||
i18n("中文"): "all_zh",#全部按中文识别
|
|
||||||
i18n("英文"): "en",#全部按英文识别#######不变
|
|
||||||
i18n("日文"): "all_ja",#全部按日文识别
|
|
||||||
i18n("粤语"): "all_yue",#全部按中文识别
|
|
||||||
i18n("韩文"): "all_ko",#全部按韩文识别
|
|
||||||
i18n("中英混合"): "zh",#按中英混合识别####不变
|
|
||||||
i18n("日英混合"): "ja",#按日英混合识别####不变
|
|
||||||
i18n("粤英混合"): "yue",#按粤英混合识别####不变
|
|
||||||
i18n("韩英混合"): "ko",#按韩英混合识别####不变
|
|
||||||
i18n("多语种混合"): "auto",#多语种启动切分识别语种
|
|
||||||
i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
|
|
||||||
}
|
|
||||||
dict_language = dict_language_v1 if version =='v1' else dict_language_v2
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
|
||||||
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
|
|
||||||
if is_half == True:
|
|
||||||
bert_model = bert_model.half().to(device)
|
|
||||||
else:
|
|
||||||
bert_model = bert_model.to(device)
|
|
||||||
|
|
||||||
|
|
||||||
def get_bert_feature(text, word2ph):
|
|
||||||
with torch.no_grad():
|
|
||||||
inputs = tokenizer(text, return_tensors="pt")
|
|
||||||
for i in inputs:
|
|
||||||
inputs[i] = inputs[i].to(device)
|
|
||||||
res = bert_model(**inputs, output_hidden_states=True)
|
|
||||||
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
|
|
||||||
assert len(word2ph) == len(text)
|
|
||||||
phone_level_feature = []
|
|
||||||
for i in range(len(word2ph)):
|
|
||||||
repeat_feature = res[i].repeat(word2ph[i], 1)
|
|
||||||
phone_level_feature.append(repeat_feature)
|
|
||||||
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
|
||||||
return phone_level_feature.T
|
|
||||||
|
|
||||||
|
|
||||||
class DictToAttrRecursive(dict):
|
|
||||||
def __init__(self, input_dict):
|
|
||||||
super().__init__(input_dict)
|
|
||||||
for key, value in input_dict.items():
|
|
||||||
if isinstance(value, dict):
|
|
||||||
value = DictToAttrRecursive(value)
|
|
||||||
self[key] = value
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def __getattr__(self, item):
|
|
||||||
try:
|
|
||||||
return self[item]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(f"Attribute {item} not found")
|
|
||||||
|
|
||||||
def __setattr__(self, key, value):
|
|
||||||
if isinstance(value, dict):
|
|
||||||
value = DictToAttrRecursive(value)
|
|
||||||
super(DictToAttrRecursive, self).__setitem__(key, value)
|
|
||||||
super().__setattr__(key, value)
|
|
||||||
|
|
||||||
def __delattr__(self, item):
|
|
||||||
try:
|
|
||||||
del self[item]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(f"Attribute {item} not found")
|
|
||||||
|
|
||||||
|
|
||||||
ssl_model = cnhubert.get_model()
|
|
||||||
if is_half == True:
|
|
||||||
ssl_model = ssl_model.half().to(device)
|
|
||||||
else:
|
|
||||||
ssl_model = ssl_model.to(device)
|
|
||||||
|
|
||||||
resample_transform_dict={}
|
|
||||||
def resample(audio_tensor, sr0):
|
|
||||||
global resample_transform_dict
|
|
||||||
if sr0 not in resample_transform_dict:
|
|
||||||
resample_transform_dict[sr0] = torchaudio.transforms.Resample(
|
|
||||||
sr0, 24000
|
|
||||||
).to(device)
|
|
||||||
return resample_transform_dict[sr0](audio_tensor)
|
|
||||||
|
|
||||||
###todo:put them to process_ckpt and modify my_save func (save sovits weights), gpt save weights use my_save in process_ckpt
|
|
||||||
#symbol_version-model_version-if_lora_v3
|
|
||||||
from process_ckpt import get_sovits_version_from_path_fast,load_sovits_new
|
|
||||||
def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
|
|
||||||
global vq_model, hps, version, model_version, dict_language,if_lora_v3
|
|
||||||
version, model_version, if_lora_v3=get_sovits_version_from_path_fast(sovits_path)
|
|
||||||
# print(sovits_path,version, model_version, if_lora_v3)
|
|
||||||
if if_lora_v3==True and is_exist_s2gv3==False:
|
|
||||||
info= "GPT_SoVITS/pretrained_models/s2Gv3.pth" + i18n("SoVITS V3 底模缺失,无法加载相应 LoRA 权重")
|
|
||||||
gr.Warning(info)
|
|
||||||
raise FileExistsError(info)
|
|
||||||
dict_language = dict_language_v1 if version =='v1' else dict_language_v2
|
|
||||||
if prompt_language is not None and text_language is not None:
|
|
||||||
if prompt_language in list(dict_language.keys()):
|
|
||||||
prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
|
|
||||||
else:
|
|
||||||
prompt_text_update = {'__type__':'update', 'value':''}
|
|
||||||
prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
|
|
||||||
if text_language in list(dict_language.keys()):
|
|
||||||
text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
|
|
||||||
else:
|
|
||||||
text_update = {'__type__':'update', 'value':''}
|
|
||||||
text_language_update = {'__type__':'update', 'value':i18n("中文")}
|
|
||||||
if model_version=="v3":
|
|
||||||
visible_sample_steps=True
|
|
||||||
visible_inp_refs=False
|
|
||||||
else:
|
|
||||||
visible_sample_steps=False
|
|
||||||
visible_inp_refs=True
|
|
||||||
yield {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update,{"__type__": "update", "visible": visible_sample_steps},{"__type__": "update", "visible": visible_inp_refs},{"__type__": "update", "value": False,"interactive":True if model_version!="v3"else False},{"__type__": "update", "visible":True if model_version=="v3"else False}
|
|
||||||
|
|
||||||
dict_s2 = load_sovits_new(sovits_path)
|
|
||||||
hps = dict_s2["config"]
|
|
||||||
hps = DictToAttrRecursive(hps)
|
|
||||||
hps.model.semantic_frame_rate = "25hz"
|
|
||||||
if 'enc_p.text_embedding.weight'not in dict_s2['weight']:
|
|
||||||
hps.model.version = "v2"#v3model,v2sybomls
|
|
||||||
elif dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
|
||||||
hps.model.version = "v1"
|
|
||||||
else:
|
|
||||||
hps.model.version = "v2"
|
|
||||||
version=hps.model.version
|
|
||||||
# print("sovits版本:",hps.model.version)
|
|
||||||
if model_version!="v3":
|
|
||||||
vq_model = SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model
|
|
||||||
)
|
|
||||||
model_version=version
|
|
||||||
else:
|
|
||||||
vq_model = SynthesizerTrnV3(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model
|
|
||||||
)
|
|
||||||
if ("pretrained" not in sovits_path):
|
|
||||||
try:
|
|
||||||
del vq_model.enc_q
|
|
||||||
except:pass
|
|
||||||
if is_half == True:
|
|
||||||
vq_model = vq_model.half().to(device)
|
|
||||||
else:
|
|
||||||
vq_model = vq_model.to(device)
|
|
||||||
vq_model.eval()
|
|
||||||
if if_lora_v3==False:
|
|
||||||
print("loading sovits_%s"%model_version,vq_model.load_state_dict(dict_s2["weight"], strict=False))
|
|
||||||
else:
|
|
||||||
print("loading sovits_v3pretrained_G", vq_model.load_state_dict(load_sovits_new(path_sovits_v3)["weight"], strict=False))
|
|
||||||
lora_rank=dict_s2["lora_rank"]
|
|
||||||
lora_config = LoraConfig(
|
|
||||||
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
|
|
||||||
r=lora_rank,
|
|
||||||
lora_alpha=lora_rank,
|
|
||||||
init_lora_weights=True,
|
|
||||||
)
|
|
||||||
vq_model.cfm = get_peft_model(vq_model.cfm, lora_config)
|
|
||||||
print("loading sovits_v3_lora%s"%(lora_rank))
|
|
||||||
vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
|
||||||
vq_model.cfm = vq_model.cfm.merge_and_unload()
|
|
||||||
# torch.save(vq_model.state_dict(),"merge_win.pth")
|
|
||||||
vq_model.eval()
|
|
||||||
|
|
||||||
with open("./weight.json")as f:
|
|
||||||
data=f.read()
|
|
||||||
data=json.loads(data)
|
|
||||||
data["SoVITS"][version]=sovits_path
|
|
||||||
with open("./weight.json","w")as f:f.write(json.dumps(data))
|
|
||||||
|
|
||||||
|
|
||||||
try:next(change_sovits_weights(sovits_path))
|
|
||||||
except:pass
|
|
||||||
|
|
||||||
def change_gpt_weights(gpt_path):
|
|
||||||
global hz, max_sec, t2s_model, config
|
|
||||||
hz = 50
|
|
||||||
dict_s1 = torch.load(gpt_path, map_location="cpu")
|
|
||||||
config = dict_s1["config"]
|
|
||||||
max_sec = config["data"]["max_sec"]
|
|
||||||
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
|
|
||||||
t2s_model.load_state_dict(dict_s1["weight"])
|
|
||||||
if is_half == True:
|
|
||||||
t2s_model = t2s_model.half()
|
|
||||||
t2s_model = t2s_model.to(device)
|
|
||||||
t2s_model.eval()
|
|
||||||
# total = sum([param.nelement() for param in t2s_model.parameters()])
|
|
||||||
# print("Number of parameter: %.2fM" % (total / 1e6))
|
|
||||||
with open("./weight.json")as f:
|
|
||||||
data=f.read()
|
|
||||||
data=json.loads(data)
|
|
||||||
data["GPT"][version]=gpt_path
|
|
||||||
with open("./weight.json","w")as f:f.write(json.dumps(data))
|
|
||||||
|
|
||||||
|
|
||||||
change_gpt_weights(gpt_path)
|
|
||||||
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
|
||||||
import torch,soundfile
|
|
||||||
now_dir = os.getcwd()
|
|
||||||
import soundfile
|
|
||||||
|
|
||||||
def init_bigvgan():
|
|
||||||
global bigvgan_model
|
|
||||||
from BigVGAN import bigvgan
|
|
||||||
bigvgan_model = bigvgan.BigVGAN.from_pretrained("%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), use_cuda_kernel=False) # if True, RuntimeError: Ninja is required to load C++ extensions
|
|
||||||
# remove weight norm in the model and set to eval mode
|
|
||||||
bigvgan_model.remove_weight_norm()
|
|
||||||
bigvgan_model = bigvgan_model.eval()
|
|
||||||
if is_half == True:
|
|
||||||
bigvgan_model = bigvgan_model.half().to(device)
|
|
||||||
else:
|
|
||||||
bigvgan_model = bigvgan_model.to(device)
|
|
||||||
|
|
||||||
if model_version!="v3":bigvgan_model=None
|
|
||||||
else:init_bigvgan()
|
|
||||||
|
|
||||||
|
|
||||||
def get_spepc(hps, filename):
|
|
||||||
# audio = load_audio(filename, int(hps.data.sampling_rate))
|
|
||||||
audio, sampling_rate = librosa.load(filename, sr=int(hps.data.sampling_rate))
|
|
||||||
audio = torch.FloatTensor(audio)
|
|
||||||
maxx=audio.abs().max()
|
|
||||||
if(maxx>1):audio/=min(2,maxx)
|
|
||||||
audio_norm = audio
|
|
||||||
audio_norm = audio_norm.unsqueeze(0)
|
|
||||||
spec = spectrogram_torch(
|
|
||||||
audio_norm,
|
|
||||||
hps.data.filter_length,
|
|
||||||
hps.data.sampling_rate,
|
|
||||||
hps.data.hop_length,
|
|
||||||
hps.data.win_length,
|
|
||||||
center=False,
|
|
||||||
)
|
|
||||||
return spec
|
|
||||||
|
|
||||||
def clean_text_inf(text, language, version):
|
|
||||||
language = language.replace("all_","")
|
|
||||||
phones, word2ph, norm_text = clean_text(text, language, version)
|
|
||||||
phones = cleaned_text_to_sequence(phones, version)
|
|
||||||
return phones, word2ph, norm_text
|
|
||||||
|
|
||||||
dtype=torch.float16 if is_half == True else torch.float32
|
|
||||||
def get_bert_inf(phones, word2ph, norm_text, language):
|
|
||||||
language=language.replace("all_","")
|
|
||||||
if language == "zh":
|
|
||||||
bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
|
|
||||||
else:
|
|
||||||
bert = torch.zeros(
|
|
||||||
(1024, len(phones)),
|
|
||||||
dtype=torch.float16 if is_half == True else torch.float32,
|
|
||||||
).to(device)
|
|
||||||
|
|
||||||
return bert
|
|
||||||
|
|
||||||
|
|
||||||
splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "…", }
|
|
||||||
|
|
||||||
|
|
||||||
def get_first(text):
|
|
||||||
pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
|
|
||||||
text = re.split(pattern, text)[0].strip()
|
|
||||||
return text
|
|
||||||
|
|
||||||
from text import chinese
|
|
||||||
def get_phones_and_bert(text,language,version,final=False):
|
|
||||||
if language in {"en", "all_zh", "all_ja", "all_ko", "all_yue"}:
|
|
||||||
formattext = text
|
|
||||||
while " " in formattext:
|
|
||||||
formattext = formattext.replace(" ", " ")
|
|
||||||
if language == "all_zh":
|
|
||||||
if re.search(r'[A-Za-z]', formattext):
|
|
||||||
formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
|
|
||||||
formattext = chinese.mix_text_normalize(formattext)
|
|
||||||
return get_phones_and_bert(formattext,"zh",version)
|
|
||||||
else:
|
|
||||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
|
||||||
bert = get_bert_feature(norm_text, word2ph).to(device)
|
|
||||||
elif language == "all_yue" and re.search(r'[A-Za-z]', formattext):
|
|
||||||
formattext = re.sub(r'[a-z]', lambda x: x.group(0).upper(), formattext)
|
|
||||||
formattext = chinese.mix_text_normalize(formattext)
|
|
||||||
return get_phones_and_bert(formattext,"yue",version)
|
|
||||||
else:
|
|
||||||
phones, word2ph, norm_text = clean_text_inf(formattext, language, version)
|
|
||||||
bert = torch.zeros(
|
|
||||||
(1024, len(phones)),
|
|
||||||
dtype=torch.float16 if is_half == True else torch.float32,
|
|
||||||
).to(device)
|
|
||||||
elif language in {"zh", "ja", "ko", "yue", "auto", "auto_yue"}:
|
|
||||||
textlist=[]
|
|
||||||
langlist=[]
|
|
||||||
if language == "auto":
|
|
||||||
for tmp in LangSegmenter.getTexts(text):
|
|
||||||
langlist.append(tmp["lang"])
|
|
||||||
textlist.append(tmp["text"])
|
|
||||||
elif language == "auto_yue":
|
|
||||||
for tmp in LangSegmenter.getTexts(text):
|
|
||||||
if tmp["lang"] == "zh":
|
|
||||||
tmp["lang"] = "yue"
|
|
||||||
langlist.append(tmp["lang"])
|
|
||||||
textlist.append(tmp["text"])
|
|
||||||
else:
|
|
||||||
for tmp in LangSegmenter.getTexts(text):
|
|
||||||
if tmp["lang"] == "en":
|
|
||||||
langlist.append(tmp["lang"])
|
|
||||||
else:
|
|
||||||
# 因无法区别中日韩文汉字,以用户输入为准
|
|
||||||
langlist.append(language)
|
|
||||||
textlist.append(tmp["text"])
|
|
||||||
print(textlist)
|
|
||||||
print(langlist)
|
|
||||||
phones_list = []
|
|
||||||
bert_list = []
|
|
||||||
norm_text_list = []
|
|
||||||
for i in range(len(textlist)):
|
|
||||||
lang = langlist[i]
|
|
||||||
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang, version)
|
|
||||||
bert = get_bert_inf(phones, word2ph, norm_text, lang)
|
|
||||||
phones_list.append(phones)
|
|
||||||
norm_text_list.append(norm_text)
|
|
||||||
bert_list.append(bert)
|
|
||||||
bert = torch.cat(bert_list, dim=1)
|
|
||||||
phones = sum(phones_list, [])
|
|
||||||
norm_text = ''.join(norm_text_list)
|
|
||||||
|
|
||||||
if not final and len(phones) < 6:
|
|
||||||
return get_phones_and_bert("." + text,language,version,final=True)
|
|
||||||
|
|
||||||
return phones,bert.to(dtype),norm_text
|
|
||||||
|
|
||||||
from module.mel_processing import spectrogram_torch,mel_spectrogram_torch
|
|
||||||
spec_min = -12
|
|
||||||
spec_max = 2
|
|
||||||
def norm_spec(x):
|
|
||||||
return (x - spec_min) / (spec_max - spec_min) * 2 - 1
|
|
||||||
def denorm_spec(x):
|
|
||||||
return (x + 1) / 2 * (spec_max - spec_min) + spec_min
|
|
||||||
mel_fn=lambda x: mel_spectrogram_torch(x, **{
|
|
||||||
"n_fft": 1024,
|
|
||||||
"win_size": 1024,
|
|
||||||
"hop_size": 256,
|
|
||||||
"num_mels": 100,
|
|
||||||
"sampling_rate": 24000,
|
|
||||||
"fmin": 0,
|
|
||||||
"fmax": None,
|
|
||||||
"center": False
|
|
||||||
})
|
|
||||||
|
|
||||||
def merge_short_text_in_array(texts, threshold):
|
|
||||||
if (len(texts)) < 2:
|
|
||||||
return texts
|
|
||||||
result = []
|
|
||||||
text = ""
|
|
||||||
for ele in texts:
|
|
||||||
text += ele
|
|
||||||
if len(text) >= threshold:
|
|
||||||
result.append(text)
|
|
||||||
text = ""
|
|
||||||
if (len(text) > 0):
|
|
||||||
if len(result) == 0:
|
|
||||||
result.append(text)
|
|
||||||
else:
|
|
||||||
result[len(result) - 1] += text
|
|
||||||
return result
|
|
||||||
|
|
||||||
sr_model=None
|
|
||||||
def audio_sr(audio,sr):
|
|
||||||
global sr_model
|
|
||||||
if sr_model==None:
|
|
||||||
from tools.audio_sr import AP_BWE
|
|
||||||
try:
|
|
||||||
sr_model=AP_BWE(device,DictToAttrRecursive)
|
|
||||||
except FileNotFoundError:
|
|
||||||
gr.Warning(i18n("你没有下载超分模型的参数,因此不进行超分。如想超分请先参照教程把文件下载好"))
|
|
||||||
return audio.cpu().detach().numpy(),sr
|
|
||||||
return sr_model(audio,sr)
|
|
||||||
|
|
||||||
|
|
||||||
##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
|
|
||||||
# cache_tokens={}#暂未实现清理机制
|
|
||||||
cache= {}
|
|
||||||
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=None,sample_steps=8,if_sr=False,pause_second=0.3):
|
|
||||||
global cache
|
|
||||||
if ref_wav_path:pass
|
|
||||||
else:gr.Warning(i18n('请上传参考音频'))
|
|
||||||
if text:pass
|
|
||||||
else:gr.Warning(i18n('请填入推理文本'))
|
|
||||||
t = []
|
|
||||||
if prompt_text is None or len(prompt_text) == 0:
|
|
||||||
ref_free = True
|
|
||||||
if model_version=="v3":
|
|
||||||
ref_free=False#s2v3暂不支持ref_free
|
|
||||||
else:
|
|
||||||
if_sr=False
|
|
||||||
t0 = ttime()
|
|
||||||
prompt_language = dict_language[prompt_language]
|
|
||||||
text_language = dict_language[text_language]
|
|
||||||
|
|
||||||
|
|
||||||
if not ref_free:
|
|
||||||
prompt_text = prompt_text.strip("\n")
|
|
||||||
if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
|
|
||||||
print(i18n("实际输入的参考文本:"), prompt_text)
|
|
||||||
text = text.strip("\n")
|
|
||||||
# if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
|
|
||||||
|
|
||||||
print(i18n("实际输入的目标文本:"), text)
|
|
||||||
zero_wav = np.zeros(
|
|
||||||
int(hps.data.sampling_rate * pause_second),
|
|
||||||
dtype=np.float16 if is_half == True else np.float32,
|
|
||||||
)
|
|
||||||
zero_wav_torch = torch.from_numpy(zero_wav)
|
|
||||||
if is_half == True:
|
|
||||||
zero_wav_torch = zero_wav_torch.half().to(device)
|
|
||||||
else:
|
|
||||||
zero_wav_torch = zero_wav_torch.to(device)
|
|
||||||
if not ref_free:
|
|
||||||
with torch.no_grad():
|
|
||||||
wav16k, sr = librosa.load(ref_wav_path, sr=16000)
|
|
||||||
if (wav16k.shape[0] > 160000 or wav16k.shape[0] < 48000):
|
|
||||||
gr.Warning(i18n("参考音频在3~10秒范围外,请更换!"))
|
|
||||||
raise OSError(i18n("参考音频在3~10秒范围外,请更换!"))
|
|
||||||
wav16k = torch.from_numpy(wav16k)
|
|
||||||
if is_half == True:
|
|
||||||
wav16k = wav16k.half().to(device)
|
|
||||||
else:
|
|
||||||
wav16k = wav16k.to(device)
|
|
||||||
wav16k = torch.cat([wav16k, zero_wav_torch])
|
|
||||||
ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
|
|
||||||
"last_hidden_state"
|
|
||||||
].transpose(
|
|
||||||
1, 2
|
|
||||||
) # .float()
|
|
||||||
codes = vq_model.extract_latent(ssl_content)
|
|
||||||
prompt_semantic = codes[0, 0]
|
|
||||||
prompt = prompt_semantic.unsqueeze(0).to(device)
|
|
||||||
|
|
||||||
t1 = ttime()
|
|
||||||
t.append(t1-t0)
|
|
||||||
|
|
||||||
if (how_to_cut == i18n("凑四句一切")):
|
|
||||||
text = cut1(text)
|
|
||||||
elif (how_to_cut == i18n("凑50字一切")):
|
|
||||||
text = cut2(text)
|
|
||||||
elif (how_to_cut == i18n("按中文句号。切")):
|
|
||||||
text = cut3(text)
|
|
||||||
elif (how_to_cut == i18n("按英文句号.切")):
|
|
||||||
text = cut4(text)
|
|
||||||
elif (how_to_cut == i18n("按标点符号切")):
|
|
||||||
text = cut5(text)
|
|
||||||
while "\n\n" in text:
|
|
||||||
text = text.replace("\n\n", "\n")
|
|
||||||
print(i18n("实际输入的目标文本(切句后):"), text)
|
|
||||||
texts = text.split("\n")
|
|
||||||
texts = process_text(texts)
|
|
||||||
texts = merge_short_text_in_array(texts, 5)
|
|
||||||
audio_opt = []
|
|
||||||
###s2v3暂不支持ref_free
|
|
||||||
if not ref_free:
|
|
||||||
phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language, version)
|
|
||||||
|
|
||||||
for i_text,text in enumerate(texts):
|
|
||||||
# 解决输入目标文本的空行导致报错的问题
|
|
||||||
if (len(text.strip()) == 0):
|
|
||||||
continue
|
|
||||||
if (text[-1] not in splits): text += "。" if text_language != "en" else "."
|
|
||||||
print(i18n("实际输入的目标文本(每句):"), text)
|
|
||||||
phones2,bert2,norm_text2=get_phones_and_bert(text, text_language, version)
|
|
||||||
print(i18n("前端处理后的文本(每句):"), norm_text2)
|
|
||||||
if not ref_free:
|
|
||||||
bert = torch.cat([bert1, bert2], 1)
|
|
||||||
all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
|
|
||||||
else:
|
|
||||||
bert = bert2
|
|
||||||
all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
|
|
||||||
|
|
||||||
bert = bert.to(device).unsqueeze(0)
|
|
||||||
all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
|
|
||||||
|
|
||||||
t2 = ttime()
|
|
||||||
# cache_key="%s-%s-%s-%s-%s-%s-%s-%s"%(ref_wav_path,prompt_text,prompt_language,text,text_language,top_k,top_p,temperature)
|
|
||||||
# print(cache.keys(),if_freeze)
|
|
||||||
if(i_text in cache and if_freeze==True):pred_semantic=cache[i_text]
|
|
||||||
else:
|
|
||||||
with torch.no_grad():
|
|
||||||
pred_semantic, idx = t2s_model.model.infer_panel(
|
|
||||||
all_phoneme_ids,
|
|
||||||
all_phoneme_len,
|
|
||||||
None if ref_free else prompt,
|
|
||||||
bert,
|
|
||||||
# prompt_phone_len=ph_offset,
|
|
||||||
top_k=top_k,
|
|
||||||
top_p=top_p,
|
|
||||||
temperature=temperature,
|
|
||||||
early_stop_num=hz * max_sec,
|
|
||||||
)
|
|
||||||
pred_semantic = pred_semantic[:, -idx:].unsqueeze(0)
|
|
||||||
cache[i_text]=pred_semantic
|
|
||||||
t3 = ttime()
|
|
||||||
###v3不存在以下逻辑和inp_refs
|
|
||||||
if model_version!="v3":
|
|
||||||
refers=[]
|
|
||||||
if(inp_refs):
|
|
||||||
for path in inp_refs:
|
|
||||||
try:
|
|
||||||
refer = get_spepc(hps, path.name).to(dtype).to(device)
|
|
||||||
refers.append(refer)
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
if(len(refers)==0):refers = [get_spepc(hps, ref_wav_path).to(dtype).to(device)]
|
|
||||||
audio = vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refers,speed=speed)[0][0]#.cpu().detach().numpy()
|
|
||||||
else:
|
|
||||||
refer = get_spepc(hps, ref_wav_path).to(device).to(dtype)
|
|
||||||
phoneme_ids0=torch.LongTensor(phones1).to(device).unsqueeze(0)
|
|
||||||
phoneme_ids1=torch.LongTensor(phones2).to(device).unsqueeze(0)
|
|
||||||
# print(11111111, phoneme_ids0, phoneme_ids1)
|
|
||||||
fea_ref,ge = vq_model.decode_encp(prompt.unsqueeze(0), phoneme_ids0, refer)
|
|
||||||
ref_audio, sr = torchaudio.load(ref_wav_path)
|
|
||||||
ref_audio=ref_audio.to(device).float()
|
|
||||||
if (ref_audio.shape[0] == 2):
|
|
||||||
ref_audio = ref_audio.mean(0).unsqueeze(0)
|
|
||||||
if sr!=24000:
|
|
||||||
ref_audio=resample(ref_audio,sr)
|
|
||||||
# print("ref_audio",ref_audio.abs().mean())
|
|
||||||
mel2 = mel_fn(ref_audio)
|
|
||||||
mel2 = norm_spec(mel2)
|
|
||||||
T_min = min(mel2.shape[2], fea_ref.shape[2])
|
|
||||||
mel2 = mel2[:, :, :T_min]
|
|
||||||
fea_ref = fea_ref[:, :, :T_min]
|
|
||||||
if (T_min > 468):
|
|
||||||
mel2 = mel2[:, :, -468:]
|
|
||||||
fea_ref = fea_ref[:, :, -468:]
|
|
||||||
T_min = 468
|
|
||||||
chunk_len = 934 - T_min
|
|
||||||
# print("fea_ref",fea_ref,fea_ref.shape)
|
|
||||||
# print("mel2",mel2)
|
|
||||||
mel2=mel2.to(dtype)
|
|
||||||
fea_todo, ge = vq_model.decode_encp(pred_semantic, phoneme_ids1, refer, ge,speed)
|
|
||||||
# print("fea_todo",fea_todo)
|
|
||||||
# print("ge",ge.abs().mean())
|
|
||||||
cfm_resss = []
|
|
||||||
idx = 0
|
|
||||||
while (1):
|
|
||||||
fea_todo_chunk = fea_todo[:, :, idx:idx + chunk_len]
|
|
||||||
if (fea_todo_chunk.shape[-1] == 0): break
|
|
||||||
idx += chunk_len
|
|
||||||
fea = torch.cat([fea_ref, fea_todo_chunk], 2).transpose(2, 1)
|
|
||||||
# set_seed(123)
|
|
||||||
cfm_res = vq_model.cfm.inference(fea, torch.LongTensor([fea.size(1)]).to(fea.device), mel2, sample_steps, inference_cfg_rate=0)
|
|
||||||
cfm_res = cfm_res[:, :, mel2.shape[2]:]
|
|
||||||
mel2 = cfm_res[:, :, -T_min:]
|
|
||||||
# print("fea", fea)
|
|
||||||
# print("mel2in", mel2)
|
|
||||||
fea_ref = fea_todo_chunk[:, :, -T_min:]
|
|
||||||
cfm_resss.append(cfm_res)
|
|
||||||
cmf_res = torch.cat(cfm_resss, 2)
|
|
||||||
cmf_res = denorm_spec(cmf_res)
|
|
||||||
if bigvgan_model==None:init_bigvgan()
|
|
||||||
with torch.inference_mode():
|
|
||||||
wav_gen = bigvgan_model(cmf_res)
|
|
||||||
audio=wav_gen[0][0]#.cpu().detach().numpy()
|
|
||||||
max_audio=torch.abs(audio).max()#简单防止16bit爆音
|
|
||||||
if max_audio>1:audio=audio/max_audio
|
|
||||||
audio_opt.append(audio)
|
|
||||||
audio_opt.append(zero_wav_torch)#zero_wav
|
|
||||||
t4 = ttime()
|
|
||||||
t.extend([t2 - t1,t3 - t2, t4 - t3])
|
|
||||||
t1 = ttime()
|
|
||||||
print("%.3f\t%.3f\t%.3f\t%.3f" % (t[0], sum(t[1::3]), sum(t[2::3]), sum(t[3::3])))
|
|
||||||
audio_opt=torch.cat(audio_opt, 0)#np.concatenate
|
|
||||||
sr=hps.data.sampling_rate if model_version!="v3"else 24000
|
|
||||||
if if_sr==True and sr==24000:
|
|
||||||
print(i18n("音频超分中"))
|
|
||||||
audio_opt,sr=audio_sr(audio_opt.unsqueeze(0),sr)
|
|
||||||
max_audio=np.abs(audio_opt).max()
|
|
||||||
if max_audio > 1: audio_opt /= max_audio
|
|
||||||
else:
|
|
||||||
audio_opt=audio_opt.cpu().detach().numpy()
|
|
||||||
yield sr, (audio_opt * 32767).astype(np.int16)
|
|
||||||
|
|
||||||
|
|
||||||
def split(todo_text):
|
|
||||||
todo_text = todo_text.replace("……", "。").replace("——", ",")
|
|
||||||
if todo_text[-1] not in splits:
|
|
||||||
todo_text += "。"
|
|
||||||
i_split_head = i_split_tail = 0
|
|
||||||
len_text = len(todo_text)
|
|
||||||
todo_texts = []
|
|
||||||
while 1:
|
|
||||||
if i_split_head >= len_text:
|
|
||||||
break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
|
|
||||||
if todo_text[i_split_head] in splits:
|
|
||||||
i_split_head += 1
|
|
||||||
todo_texts.append(todo_text[i_split_tail:i_split_head])
|
|
||||||
i_split_tail = i_split_head
|
|
||||||
else:
|
|
||||||
i_split_head += 1
|
|
||||||
return todo_texts
|
|
||||||
|
|
||||||
|
|
||||||
def cut1(inp):
|
|
||||||
inp = inp.strip("\n")
|
|
||||||
inps = split(inp)
|
|
||||||
split_idx = list(range(0, len(inps), 4))
|
|
||||||
split_idx[-1] = None
|
|
||||||
if len(split_idx) > 1:
|
|
||||||
opts = []
|
|
||||||
for idx in range(len(split_idx) - 1):
|
|
||||||
opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
|
|
||||||
else:
|
|
||||||
opts = [inp]
|
|
||||||
opts = [item for item in opts if not set(item).issubset(punctuation)]
|
|
||||||
return "\n".join(opts)
|
|
||||||
|
|
||||||
|
|
||||||
def cut2(inp):
|
|
||||||
inp = inp.strip("\n")
|
|
||||||
inps = split(inp)
|
|
||||||
if len(inps) < 2:
|
|
||||||
return inp
|
|
||||||
opts = []
|
|
||||||
summ = 0
|
|
||||||
tmp_str = ""
|
|
||||||
for i in range(len(inps)):
|
|
||||||
summ += len(inps[i])
|
|
||||||
tmp_str += inps[i]
|
|
||||||
if summ > 50:
|
|
||||||
summ = 0
|
|
||||||
opts.append(tmp_str)
|
|
||||||
tmp_str = ""
|
|
||||||
if tmp_str != "":
|
|
||||||
opts.append(tmp_str)
|
|
||||||
# print(opts)
|
|
||||||
if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
|
|
||||||
opts[-2] = opts[-2] + opts[-1]
|
|
||||||
opts = opts[:-1]
|
|
||||||
opts = [item for item in opts if not set(item).issubset(punctuation)]
|
|
||||||
return "\n".join(opts)
|
|
||||||
|
|
||||||
|
|
||||||
def cut3(inp):
|
|
||||||
inp = inp.strip("\n")
|
|
||||||
opts = ["%s" % item for item in inp.strip("。").split("。")]
|
|
||||||
opts = [item for item in opts if not set(item).issubset(punctuation)]
|
|
||||||
return "\n".join(opts)
|
|
||||||
|
|
||||||
def cut4(inp):
|
|
||||||
inp = inp.strip("\n")
|
|
||||||
opts = re.split(r'(?<!\d)\.(?!\d)', inp.strip("."))
|
|
||||||
opts = [item for item in opts if not set(item).issubset(punctuation)]
|
|
||||||
return "\n".join(opts)
|
|
||||||
|
|
||||||
|
|
||||||
# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
|
|
||||||
def cut5(inp):
|
|
||||||
inp = inp.strip("\n")
|
|
||||||
punds = {',', '.', ';', '?', '!', '、', ',', '。', '?', '!', ';', ':', '…'}
|
|
||||||
mergeitems = []
|
|
||||||
items = []
|
|
||||||
|
|
||||||
for i, char in enumerate(inp):
|
|
||||||
if char in punds:
|
|
||||||
if char == '.' and i > 0 and i < len(inp) - 1 and inp[i - 1].isdigit() and inp[i + 1].isdigit():
|
|
||||||
items.append(char)
|
|
||||||
else:
|
|
||||||
items.append(char)
|
|
||||||
mergeitems.append("".join(items))
|
|
||||||
items = []
|
|
||||||
else:
|
|
||||||
items.append(char)
|
|
||||||
|
|
||||||
if items:
|
|
||||||
mergeitems.append("".join(items))
|
|
||||||
|
|
||||||
opt = [item for item in mergeitems if not set(item).issubset(punds)]
|
|
||||||
return "\n".join(opt)
|
|
||||||
|
|
||||||
|
|
||||||
def custom_sort_key(s):
|
|
||||||
# 使用正则表达式提取字符串中的数字部分和非数字部分
|
|
||||||
parts = re.split('(\d+)', s)
|
|
||||||
# 将数字部分转换为整数,非数字部分保持不变
|
|
||||||
parts = [int(part) if part.isdigit() else part for part in parts]
|
|
||||||
return parts
|
|
||||||
|
|
||||||
def process_text(texts):
|
|
||||||
_text=[]
|
|
||||||
if all(text in [None, " ", "\n",""] for text in texts):
|
|
||||||
raise ValueError(i18n("请输入有效文本"))
|
|
||||||
for text in texts:
|
|
||||||
if text in [None, " ", ""]:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
_text.append(text)
|
|
||||||
return _text
|
|
||||||
|
|
||||||
|
|
||||||
def change_choices():
|
|
||||||
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
|
|
||||||
return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
|
|
||||||
|
|
||||||
|
|
||||||
SoVITS_weight_root=["SoVITS_weights","SoVITS_weights_v2","SoVITS_weights_v3"]
|
|
||||||
GPT_weight_root=["GPT_weights","GPT_weights_v2","GPT_weights_v3"]
|
|
||||||
for path in SoVITS_weight_root+GPT_weight_root:
|
|
||||||
os.makedirs(path,exist_ok=True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_weights_names(GPT_weight_root, SoVITS_weight_root):
|
|
||||||
SoVITS_names = [i for i in pretrained_sovits_name]
|
|
||||||
for path in SoVITS_weight_root:
|
|
||||||
for name in os.listdir(path):
|
|
||||||
if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
|
|
||||||
GPT_names = [i for i in pretrained_gpt_name]
|
|
||||||
for path in GPT_weight_root:
|
|
||||||
for name in os.listdir(path):
|
|
||||||
if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
|
|
||||||
return SoVITS_names, GPT_names
|
|
||||||
|
|
||||||
|
|
||||||
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
|
|
||||||
|
|
||||||
def html_center(text, label='p'):
|
|
||||||
return f"""<div style="text-align: center; margin: 100; padding: 50;">
|
|
||||||
<{label} style="margin: 0; padding: 0;">{text}</{label}>
|
|
||||||
</div>"""
|
|
||||||
|
|
||||||
def html_left(text, label='p'):
|
|
||||||
return f"""<div style="text-align: left; margin: 0; padding: 0;">
|
|
||||||
<{label} style="margin: 0; padding: 0;">{text}</{label}>
|
|
||||||
</div>"""
|
|
||||||
|
|
||||||
|
|
||||||
with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
|
||||||
gr.Markdown(
|
|
||||||
value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.") + "<br>" + i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
|
|
||||||
)
|
|
||||||
with gr.Group():
|
|
||||||
gr.Markdown(html_center(i18n("模型切换"),'h3'))
|
|
||||||
with gr.Row():
|
|
||||||
GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True, scale=14)
|
|
||||||
SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True, scale=14)
|
|
||||||
refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary", scale=14)
|
|
||||||
refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
|
|
||||||
gr.Markdown(html_center(i18n("*请上传并填写参考信息"),'h3'))
|
|
||||||
with gr.Row():
|
|
||||||
inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath", scale=13)
|
|
||||||
with gr.Column(scale=13):
|
|
||||||
ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。")+i18n("v3暂不支持该模式,使用了会报错。"), value=False, interactive=True, show_label=True,scale=1)
|
|
||||||
gr.Markdown(html_left(i18n("使用无参考文本模式时建议使用微调的GPT")+"<br>"+i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")))
|
|
||||||
prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5,scale=1)
|
|
||||||
with gr.Column(scale=14):
|
|
||||||
prompt_language = gr.Dropdown(
|
|
||||||
label=i18n("参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文"),
|
|
||||||
)
|
|
||||||
inp_refs = gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple")if model_version!="v3"else gr.File(label=i18n("可选项:通过拖拽多个文件上传多个参考音频(建议同性),平均融合他们的音色。如不填写此项,音色由左侧单个参考音频控制。如是微调模型,建议参考音频全部在微调训练集音色内,底模不用管。"),file_count="multiple",visible=False)
|
|
||||||
sample_steps = gr.Radio(label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),value=32,choices=[4,8,16,32],visible=True)if model_version=="v3"else gr.Radio(label=i18n("采样步数,如果觉得电,提高试试,如果觉得慢,降低试试"),choices=[4,8,16,32],visible=False,value=32)
|
|
||||||
if_sr_Checkbox=gr.Checkbox(label=i18n("v3输出如果觉得闷可以试试开超分"), value=False, interactive=True, show_label=True,visible=False if model_version!="v3"else True)
|
|
||||||
gr.Markdown(html_center(i18n("*请填写需要合成的目标文本和语种模式"),'h3'))
|
|
||||||
with gr.Row():
|
|
||||||
with gr.Column(scale=13):
|
|
||||||
text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=26, max_lines=26)
|
|
||||||
with gr.Column(scale=7):
|
|
||||||
text_language = gr.Dropdown(
|
|
||||||
label=i18n("需要合成的语种")+i18n(".限制范围越小判别效果越好。"), choices=list(dict_language.keys()), value=i18n("中文"), scale=1
|
|
||||||
)
|
|
||||||
how_to_cut = gr.Dropdown(
|
|
||||||
label=i18n("怎么切"),
|
|
||||||
choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
|
|
||||||
value=i18n("凑四句一切"),
|
|
||||||
interactive=True, scale=1
|
|
||||||
)
|
|
||||||
gr.Markdown(value=html_center(i18n("语速调整,高为更快")))
|
|
||||||
if_freeze=gr.Checkbox(label=i18n("是否直接对上次合成结果调整语速和音色。防止随机性。"), value=False, interactive=True,show_label=True, scale=1)
|
|
||||||
with gr.Row():
|
|
||||||
speed = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label=i18n("语速"),value=1,interactive=True, scale=1)
|
|
||||||
pause_second_slider = gr.Slider(minimum=0.1,maximum=0.5,step=0.01,label=i18n("句间停顿秒数"),value=0.3,interactive=True, scale=1)
|
|
||||||
gr.Markdown(html_center(i18n("GPT采样参数(无参考文本时不要太低。不懂就用默认):")))
|
|
||||||
top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=15,interactive=True, scale=1)
|
|
||||||
top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True, scale=1)
|
|
||||||
temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True, scale=1)
|
|
||||||
# with gr.Column():
|
|
||||||
# gr.Markdown(value=i18n("手工调整音素。当音素框不为空时使用手工音素输入推理,无视目标文本框。"))
|
|
||||||
# phoneme=gr.Textbox(label=i18n("音素框"), value="")
|
|
||||||
# get_phoneme_button = gr.Button(i18n("目标文本转音素"), variant="primary")
|
|
||||||
with gr.Row():
|
|
||||||
inference_button = gr.Button(i18n("合成语音"), variant="primary", size='lg', scale=25)
|
|
||||||
output = gr.Audio(label=i18n("输出的语音"), scale=14)
|
|
||||||
|
|
||||||
inference_button.click(
|
|
||||||
get_tts_wav,
|
|
||||||
[inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free,speed,if_freeze,inp_refs,sample_steps,if_sr_Checkbox,pause_second_slider],
|
|
||||||
[output],
|
|
||||||
)
|
|
||||||
SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language,sample_steps,inp_refs,ref_text_free,if_sr_Checkbox])
|
|
||||||
GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
|
|
||||||
|
|
||||||
# gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
|
|
||||||
# with gr.Row():
|
|
||||||
# text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
|
|
||||||
# button1 = gr.Button(i18n("凑四句一切"), variant="primary")
|
|
||||||
# button2 = gr.Button(i18n("凑50字一切"), variant="primary")
|
|
||||||
# button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
|
|
||||||
# button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
|
|
||||||
# button5 = gr.Button(i18n("按标点符号切"), variant="primary")
|
|
||||||
# text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
|
|
||||||
# button1.click(cut1, [text_inp], [text_opt])
|
|
||||||
# button2.click(cut2, [text_inp], [text_opt])
|
|
||||||
# button3.click(cut3, [text_inp], [text_opt])
|
|
||||||
# button4.click(cut4, [text_inp], [text_opt])
|
|
||||||
# button5.click(cut5, [text_inp], [text_opt])
|
|
||||||
# gr.Markdown(html_center(i18n("后续将支持转音素、手工修改音素、语音合成分步执行。")))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app.queue().launch(#concurrency_count=511, max_size=1022
|
|
||||||
server_name="0.0.0.0",
|
|
||||||
inbrowser=True,
|
|
||||||
share=is_share,
|
|
||||||
server_port=infer_ttswebui,
|
|
||||||
quiet=True,
|
|
||||||
)
|
|
@ -1,336 +0,0 @@
|
|||||||
'''
|
|
||||||
按中英混合识别
|
|
||||||
按日英混合识别
|
|
||||||
多语种启动切分识别语种
|
|
||||||
全部按中文识别
|
|
||||||
全部按英文识别
|
|
||||||
全部按日文识别
|
|
||||||
'''
|
|
||||||
import random
|
|
||||||
import os, re, logging
|
|
||||||
import sys
|
|
||||||
now_dir = os.getcwd()
|
|
||||||
sys.path.append(now_dir)
|
|
||||||
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
|
||||||
|
|
||||||
logging.getLogger("markdown_it").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("urllib3").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("httpx").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("asyncio").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
|
|
||||||
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
|
|
||||||
import pdb
|
|
||||||
import torch
|
|
||||||
|
|
||||||
try:
|
|
||||||
import gradio.analytics as analytics
|
|
||||||
analytics.version_check = lambda:None
|
|
||||||
except:...
|
|
||||||
|
|
||||||
|
|
||||||
infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
|
|
||||||
infer_ttswebui = int(infer_ttswebui)
|
|
||||||
is_share = os.environ.get("is_share", "False")
|
|
||||||
is_share = eval(is_share)
|
|
||||||
if "_CUDA_VISIBLE_DEVICES" in os.environ:
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
|
|
||||||
|
|
||||||
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
|
|
||||||
gpt_path = os.environ.get("gpt_path", None)
|
|
||||||
sovits_path = os.environ.get("sovits_path", None)
|
|
||||||
cnhubert_base_path = os.environ.get("cnhubert_base_path", None)
|
|
||||||
bert_path = os.environ.get("bert_path", None)
|
|
||||||
version=os.environ.get("version","v2")
|
|
||||||
|
|
||||||
import gradio as gr
|
|
||||||
from TTS_infer_pack.TTS import TTS, TTS_Config
|
|
||||||
from TTS_infer_pack.text_segmentation_method import get_method
|
|
||||||
from tools.i18n.i18n import I18nAuto, scan_language_list
|
|
||||||
|
|
||||||
language=os.environ.get("language","Auto")
|
|
||||||
language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language
|
|
||||||
i18n = I18nAuto(language=language)
|
|
||||||
|
|
||||||
|
|
||||||
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
device = "cuda"
|
|
||||||
# elif torch.backends.mps.is_available():
|
|
||||||
# device = "mps"
|
|
||||||
else:
|
|
||||||
device = "cpu"
|
|
||||||
|
|
||||||
dict_language_v1 = {
|
|
||||||
i18n("中文"): "all_zh",#全部按中文识别
|
|
||||||
i18n("英文"): "en",#全部按英文识别#######不变
|
|
||||||
i18n("日文"): "all_ja",#全部按日文识别
|
|
||||||
i18n("中英混合"): "zh",#按中英混合识别####不变
|
|
||||||
i18n("日英混合"): "ja",#按日英混合识别####不变
|
|
||||||
i18n("多语种混合"): "auto",#多语种启动切分识别语种
|
|
||||||
}
|
|
||||||
dict_language_v2 = {
|
|
||||||
i18n("中文"): "all_zh",#全部按中文识别
|
|
||||||
i18n("英文"): "en",#全部按英文识别#######不变
|
|
||||||
i18n("日文"): "all_ja",#全部按日文识别
|
|
||||||
i18n("粤语"): "all_yue",#全部按中文识别
|
|
||||||
i18n("韩文"): "all_ko",#全部按韩文识别
|
|
||||||
i18n("中英混合"): "zh",#按中英混合识别####不变
|
|
||||||
i18n("日英混合"): "ja",#按日英混合识别####不变
|
|
||||||
i18n("粤英混合"): "yue",#按粤英混合识别####不变
|
|
||||||
i18n("韩英混合"): "ko",#按韩英混合识别####不变
|
|
||||||
i18n("多语种混合"): "auto",#多语种启动切分识别语种
|
|
||||||
i18n("多语种混合(粤语)"): "auto_yue",#多语种启动切分识别语种
|
|
||||||
}
|
|
||||||
dict_language = dict_language_v1 if version =='v1' else dict_language_v2
|
|
||||||
|
|
||||||
cut_method = {
|
|
||||||
i18n("不切"):"cut0",
|
|
||||||
i18n("凑四句一切"): "cut1",
|
|
||||||
i18n("凑50字一切"): "cut2",
|
|
||||||
i18n("按中文句号。切"): "cut3",
|
|
||||||
i18n("按英文句号.切"): "cut4",
|
|
||||||
i18n("按标点符号切"): "cut5",
|
|
||||||
}
|
|
||||||
|
|
||||||
tts_config = TTS_Config("GPT_SoVITS/configs/tts_infer.yaml")
|
|
||||||
tts_config.device = device
|
|
||||||
tts_config.is_half = is_half
|
|
||||||
tts_config.version = version
|
|
||||||
if gpt_path is not None:
|
|
||||||
tts_config.t2s_weights_path = gpt_path
|
|
||||||
if sovits_path is not None:
|
|
||||||
tts_config.vits_weights_path = sovits_path
|
|
||||||
if cnhubert_base_path is not None:
|
|
||||||
tts_config.cnhuhbert_base_path = cnhubert_base_path
|
|
||||||
if bert_path is not None:
|
|
||||||
tts_config.bert_base_path = bert_path
|
|
||||||
|
|
||||||
print(tts_config)
|
|
||||||
tts_pipeline = TTS(tts_config)
|
|
||||||
gpt_path = tts_config.t2s_weights_path
|
|
||||||
sovits_path = tts_config.vits_weights_path
|
|
||||||
version = tts_config.version
|
|
||||||
|
|
||||||
def inference(text, text_lang,
|
|
||||||
ref_audio_path,
|
|
||||||
aux_ref_audio_paths,
|
|
||||||
prompt_text,
|
|
||||||
prompt_lang, top_k,
|
|
||||||
top_p, temperature,
|
|
||||||
text_split_method, batch_size,
|
|
||||||
speed_factor, ref_text_free,
|
|
||||||
split_bucket,fragment_interval,
|
|
||||||
seed, keep_random, parallel_infer,
|
|
||||||
repetition_penalty
|
|
||||||
):
|
|
||||||
|
|
||||||
seed = -1 if keep_random else seed
|
|
||||||
actual_seed = seed if seed not in [-1, "", None] else random.randrange(1 << 32)
|
|
||||||
inputs={
|
|
||||||
"text": text,
|
|
||||||
"text_lang": dict_language[text_lang],
|
|
||||||
"ref_audio_path": ref_audio_path,
|
|
||||||
"aux_ref_audio_paths": [item.name for item in aux_ref_audio_paths] if aux_ref_audio_paths is not None else [],
|
|
||||||
"prompt_text": prompt_text if not ref_text_free else "",
|
|
||||||
"prompt_lang": dict_language[prompt_lang],
|
|
||||||
"top_k": top_k,
|
|
||||||
"top_p": top_p,
|
|
||||||
"temperature": temperature,
|
|
||||||
"text_split_method": cut_method[text_split_method],
|
|
||||||
"batch_size":int(batch_size),
|
|
||||||
"speed_factor":float(speed_factor),
|
|
||||||
"split_bucket":split_bucket,
|
|
||||||
"return_fragment":False,
|
|
||||||
"fragment_interval":fragment_interval,
|
|
||||||
"seed":actual_seed,
|
|
||||||
"parallel_infer": parallel_infer,
|
|
||||||
"repetition_penalty": repetition_penalty,
|
|
||||||
}
|
|
||||||
for item in tts_pipeline.run(inputs):
|
|
||||||
yield item, actual_seed
|
|
||||||
|
|
||||||
def custom_sort_key(s):
|
|
||||||
# 使用正则表达式提取字符串中的数字部分和非数字部分
|
|
||||||
parts = re.split('(\d+)', s)
|
|
||||||
# 将数字部分转换为整数,非数字部分保持不变
|
|
||||||
parts = [int(part) if part.isdigit() else part for part in parts]
|
|
||||||
return parts
|
|
||||||
|
|
||||||
|
|
||||||
def change_choices():
|
|
||||||
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
|
|
||||||
return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
|
|
||||||
|
|
||||||
|
|
||||||
pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
|
|
||||||
pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
|
|
||||||
_ =[[],[]]
|
|
||||||
for i in range(2):
|
|
||||||
if os.path.exists(pretrained_gpt_name[i]):
|
|
||||||
_[0].append(pretrained_gpt_name[i])
|
|
||||||
if os.path.exists(pretrained_sovits_name[i]):
|
|
||||||
_[-1].append(pretrained_sovits_name[i])
|
|
||||||
pretrained_gpt_name,pretrained_sovits_name = _
|
|
||||||
|
|
||||||
SoVITS_weight_root=["SoVITS_weights_v2","SoVITS_weights"]
|
|
||||||
GPT_weight_root=["GPT_weights_v2","GPT_weights"]
|
|
||||||
for path in SoVITS_weight_root+GPT_weight_root:
|
|
||||||
os.makedirs(path,exist_ok=True)
|
|
||||||
|
|
||||||
def get_weights_names(GPT_weight_root, SoVITS_weight_root):
|
|
||||||
SoVITS_names = [i for i in pretrained_sovits_name]
|
|
||||||
for path in SoVITS_weight_root:
|
|
||||||
for name in os.listdir(path):
|
|
||||||
if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (path, name))
|
|
||||||
GPT_names = [i for i in pretrained_gpt_name]
|
|
||||||
for path in GPT_weight_root:
|
|
||||||
for name in os.listdir(path):
|
|
||||||
if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (path, name))
|
|
||||||
return SoVITS_names, GPT_names
|
|
||||||
|
|
||||||
|
|
||||||
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def change_sovits_weights(sovits_path,prompt_language=None,text_language=None):
|
|
||||||
tts_pipeline.init_vits_weights(sovits_path)
|
|
||||||
global version, dict_language
|
|
||||||
dict_language = dict_language_v1 if tts_pipeline.configs.version =='v1' else dict_language_v2
|
|
||||||
if prompt_language is not None and text_language is not None:
|
|
||||||
if prompt_language in list(dict_language.keys()):
|
|
||||||
prompt_text_update, prompt_language_update = {'__type__':'update'}, {'__type__':'update', 'value':prompt_language}
|
|
||||||
else:
|
|
||||||
prompt_text_update = {'__type__':'update', 'value':''}
|
|
||||||
prompt_language_update = {'__type__':'update', 'value':i18n("中文")}
|
|
||||||
if text_language in list(dict_language.keys()):
|
|
||||||
text_update, text_language_update = {'__type__':'update'}, {'__type__':'update', 'value':text_language}
|
|
||||||
else:
|
|
||||||
text_update = {'__type__':'update', 'value':''}
|
|
||||||
text_language_update = {'__type__':'update', 'value':i18n("中文")}
|
|
||||||
return {'__type__':'update', 'choices':list(dict_language.keys())}, {'__type__':'update', 'choices':list(dict_language.keys())}, prompt_text_update, prompt_language_update, text_update, text_language_update
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
with gr.Blocks(title="GPT-SoVITS WebUI") as app:
|
|
||||||
gr.Markdown(
|
|
||||||
value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.") + "<br>" + i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
|
|
||||||
)
|
|
||||||
|
|
||||||
with gr.Column():
|
|
||||||
# with gr.Group():
|
|
||||||
gr.Markdown(value=i18n("模型切换"))
|
|
||||||
with gr.Row():
|
|
||||||
GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True)
|
|
||||||
SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True)
|
|
||||||
refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
|
|
||||||
refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
|
|
||||||
|
|
||||||
|
|
||||||
with gr.Row():
|
|
||||||
with gr.Column():
|
|
||||||
gr.Markdown(value=i18n("*请上传并填写参考信息"))
|
|
||||||
with gr.Row():
|
|
||||||
inp_ref = gr.Audio(label=i18n("主参考音频(请上传3~10秒内参考音频,超过会报错!)"), type="filepath")
|
|
||||||
inp_refs = gr.File(label=i18n("辅参考音频(可选多个,或不选)"),file_count="multiple")
|
|
||||||
prompt_text = gr.Textbox(label=i18n("主参考音频的文本"), value="", lines=2)
|
|
||||||
with gr.Row():
|
|
||||||
prompt_language = gr.Dropdown(
|
|
||||||
label=i18n("主参考音频的语种"), choices=list(dict_language.keys()), value=i18n("中文")
|
|
||||||
)
|
|
||||||
with gr.Column():
|
|
||||||
ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=True, show_label=True)
|
|
||||||
gr.Markdown(i18n("使用无参考文本模式时建议使用微调的GPT")+"<br>"+i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。"))
|
|
||||||
|
|
||||||
with gr.Column():
|
|
||||||
gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式"))
|
|
||||||
text = gr.Textbox(label=i18n("需要合成的文本"), value="", lines=20, max_lines=20)
|
|
||||||
text_language = gr.Dropdown(
|
|
||||||
label=i18n("需要合成的文本的语种"), choices=list(dict_language.keys()), value=i18n("中文")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
with gr.Group():
|
|
||||||
gr.Markdown(value=i18n("推理设置"))
|
|
||||||
with gr.Row():
|
|
||||||
|
|
||||||
with gr.Column():
|
|
||||||
batch_size = gr.Slider(minimum=1,maximum=200,step=1,label=i18n("batch_size"),value=20,interactive=True)
|
|
||||||
fragment_interval = gr.Slider(minimum=0.01,maximum=1,step=0.01,label=i18n("分段间隔(秒)"),value=0.3,interactive=True)
|
|
||||||
speed_factor = gr.Slider(minimum=0.6,maximum=1.65,step=0.05,label="speed_factor",value=1.0,interactive=True)
|
|
||||||
top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=5,interactive=True)
|
|
||||||
top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True)
|
|
||||||
temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True)
|
|
||||||
repetition_penalty = gr.Slider(minimum=0,maximum=2,step=0.05,label=i18n("重复惩罚"),value=1.35,interactive=True)
|
|
||||||
with gr.Column():
|
|
||||||
with gr.Row():
|
|
||||||
how_to_cut = gr.Dropdown(
|
|
||||||
label=i18n("怎么切"),
|
|
||||||
choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
|
|
||||||
value=i18n("凑四句一切"),
|
|
||||||
interactive=True, scale=1
|
|
||||||
)
|
|
||||||
parallel_infer = gr.Checkbox(label=i18n("并行推理"), value=True, interactive=True, show_label=True)
|
|
||||||
split_bucket = gr.Checkbox(label=i18n("数据分桶(并行推理时会降低一点计算量)"), value=True, interactive=True, show_label=True)
|
|
||||||
|
|
||||||
with gr.Row():
|
|
||||||
seed = gr.Number(label=i18n("随机种子"),value=-1)
|
|
||||||
keep_random = gr.Checkbox(label=i18n("保持随机"), value=True, interactive=True, show_label=True)
|
|
||||||
|
|
||||||
output = gr.Audio(label=i18n("输出的语音"))
|
|
||||||
with gr.Row():
|
|
||||||
inference_button = gr.Button(i18n("合成语音"), variant="primary")
|
|
||||||
stop_infer = gr.Button(i18n("终止合成"), variant="primary")
|
|
||||||
|
|
||||||
|
|
||||||
inference_button.click(
|
|
||||||
inference,
|
|
||||||
[
|
|
||||||
text,text_language, inp_ref, inp_refs,
|
|
||||||
prompt_text, prompt_language,
|
|
||||||
top_k, top_p, temperature,
|
|
||||||
how_to_cut, batch_size,
|
|
||||||
speed_factor, ref_text_free,
|
|
||||||
split_bucket,fragment_interval,
|
|
||||||
seed, keep_random, parallel_infer,
|
|
||||||
repetition_penalty
|
|
||||||
],
|
|
||||||
[output, seed],
|
|
||||||
)
|
|
||||||
stop_infer.click(tts_pipeline.stop, [], [])
|
|
||||||
SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown,prompt_language,text_language], [prompt_language,text_language,prompt_text,prompt_language,text,text_language])
|
|
||||||
GPT_dropdown.change(tts_pipeline.init_t2s_weights, [GPT_dropdown], [])
|
|
||||||
|
|
||||||
with gr.Group():
|
|
||||||
gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
|
|
||||||
with gr.Row():
|
|
||||||
text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="", lines=4)
|
|
||||||
with gr.Column():
|
|
||||||
_how_to_cut = gr.Radio(
|
|
||||||
label=i18n("怎么切"),
|
|
||||||
choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
|
|
||||||
value=i18n("凑四句一切"),
|
|
||||||
interactive=True,
|
|
||||||
)
|
|
||||||
cut_text= gr.Button(i18n("切分"), variant="primary")
|
|
||||||
|
|
||||||
def to_cut(text_inp, how_to_cut):
|
|
||||||
if len(text_inp.strip()) == 0 or text_inp==[]:
|
|
||||||
return ""
|
|
||||||
method = get_method(cut_method[how_to_cut])
|
|
||||||
return method(text_inp)
|
|
||||||
|
|
||||||
text_opt = gr.Textbox(label=i18n("切分后文本"), value="", lines=4)
|
|
||||||
cut_text.click(to_cut, [text_inp, _how_to_cut], [text_opt])
|
|
||||||
gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app.queue().launch(#concurrency_count=511, max_size=1022
|
|
||||||
server_name="0.0.0.0",
|
|
||||||
inbrowser=True,
|
|
||||||
share=is_share,
|
|
||||||
server_port=infer_ttswebui,
|
|
||||||
quiet=True,
|
|
||||||
)
|
|
@ -1,179 +0,0 @@
|
|||||||
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/train_t2s.py
|
|
||||||
import os
|
|
||||||
import pdb
|
|
||||||
|
|
||||||
if "_CUDA_VISIBLE_DEVICES" in os.environ:
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import torch, platform
|
|
||||||
from pytorch_lightning import seed_everything
|
|
||||||
from pytorch_lightning import Trainer
|
|
||||||
from pytorch_lightning.callbacks import ModelCheckpoint
|
|
||||||
from pytorch_lightning.loggers import TensorBoardLogger # WandbLogger
|
|
||||||
from pytorch_lightning.strategies import DDPStrategy
|
|
||||||
from AR.data.data_module import Text2SemanticDataModule
|
|
||||||
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
|
||||||
from AR.utils.io import load_yaml_config
|
|
||||||
|
|
||||||
logging.getLogger("numba").setLevel(logging.WARNING)
|
|
||||||
logging.getLogger("matplotlib").setLevel(logging.WARNING)
|
|
||||||
torch.set_float32_matmul_precision("high")
|
|
||||||
from AR.utils import get_newest_ckpt
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
from time import time as ttime
|
|
||||||
import shutil
|
|
||||||
from process_ckpt import my_save
|
|
||||||
|
|
||||||
|
|
||||||
class my_model_ckpt(ModelCheckpoint):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
config,
|
|
||||||
if_save_latest,
|
|
||||||
if_save_every_weights,
|
|
||||||
half_weights_save_dir,
|
|
||||||
exp_name,
|
|
||||||
**kwargs
|
|
||||||
):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self.if_save_latest = if_save_latest
|
|
||||||
self.if_save_every_weights = if_save_every_weights
|
|
||||||
self.half_weights_save_dir = half_weights_save_dir
|
|
||||||
self.exp_name = exp_name
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
def on_train_epoch_end(self, trainer, pl_module):
|
|
||||||
# if not self._should_skip_saving_checkpoint(trainer) and self._should_save_on_train_epoch_end(trainer):
|
|
||||||
if self._should_save_on_train_epoch_end(trainer):
|
|
||||||
monitor_candidates = self._monitor_candidates(trainer)
|
|
||||||
if (
|
|
||||||
self._every_n_epochs >= 1
|
|
||||||
and (trainer.current_epoch + 1) % self._every_n_epochs == 0
|
|
||||||
):
|
|
||||||
if (
|
|
||||||
self.if_save_latest == True
|
|
||||||
): ####如果设置只保存最后一个ckpt,在保存下一个ckpt后要清理掉之前的所有ckpt
|
|
||||||
to_clean = list(os.listdir(self.dirpath))
|
|
||||||
self._save_topk_checkpoint(trainer, monitor_candidates)
|
|
||||||
if self.if_save_latest == True:
|
|
||||||
for name in to_clean:
|
|
||||||
try:
|
|
||||||
os.remove("%s/%s" % (self.dirpath, name))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if self.if_save_every_weights == True:
|
|
||||||
to_save_od = OrderedDict()
|
|
||||||
to_save_od["weight"] = OrderedDict()
|
|
||||||
dictt = trainer.strategy._lightning_module.state_dict()
|
|
||||||
for key in dictt:
|
|
||||||
to_save_od["weight"][key] = dictt[key].half()
|
|
||||||
to_save_od["config"] = self.config
|
|
||||||
to_save_od["info"] = "GPT-e%s" % (trainer.current_epoch + 1)
|
|
||||||
# torch.save(
|
|
||||||
# print(os.environ)
|
|
||||||
if(os.environ.get("LOCAL_RANK","0")=="0"):
|
|
||||||
my_save(
|
|
||||||
to_save_od,
|
|
||||||
"%s/%s-e%s.ckpt"
|
|
||||||
% (
|
|
||||||
self.half_weights_save_dir,
|
|
||||||
self.exp_name,
|
|
||||||
trainer.current_epoch + 1,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
self._save_last_checkpoint(trainer, monitor_candidates)
|
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
|
||||||
config = load_yaml_config(args.config_file)
|
|
||||||
|
|
||||||
output_dir = Path(config["output_dir"])
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
ckpt_dir = output_dir / "ckpt"
|
|
||||||
ckpt_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
seed_everything(config["train"]["seed"], workers=True)
|
|
||||||
ckpt_callback: ModelCheckpoint = my_model_ckpt(
|
|
||||||
config=config,
|
|
||||||
if_save_latest=config["train"]["if_save_latest"],
|
|
||||||
if_save_every_weights=config["train"]["if_save_every_weights"],
|
|
||||||
half_weights_save_dir=config["train"]["half_weights_save_dir"],
|
|
||||||
exp_name=config["train"]["exp_name"],
|
|
||||||
save_top_k=-1,
|
|
||||||
monitor="top_3_acc",
|
|
||||||
mode="max",
|
|
||||||
save_on_train_epoch_end=True,
|
|
||||||
every_n_epochs=config["train"]["save_every_n_epoch"],
|
|
||||||
dirpath=ckpt_dir,
|
|
||||||
)
|
|
||||||
logger = TensorBoardLogger(name=output_dir.stem, save_dir=output_dir)
|
|
||||||
os.environ["MASTER_ADDR"]="localhost"
|
|
||||||
os.environ["USE_LIBUV"] = "0"
|
|
||||||
trainer: Trainer = Trainer(
|
|
||||||
max_epochs=config["train"]["epochs"],
|
|
||||||
accelerator="gpu" if torch.cuda.is_available() else "cpu",
|
|
||||||
# val_check_interval=9999999999999999999999,###不要验证
|
|
||||||
# check_val_every_n_epoch=None,
|
|
||||||
limit_val_batches=0,
|
|
||||||
devices=-1 if torch.cuda.is_available() else 1,
|
|
||||||
benchmark=False,
|
|
||||||
fast_dev_run=False,
|
|
||||||
strategy = DDPStrategy(
|
|
||||||
process_group_backend="nccl" if platform.system() != "Windows" else "gloo"
|
|
||||||
) if torch.cuda.is_available() else "auto",
|
|
||||||
precision=config["train"]["precision"],
|
|
||||||
logger=logger,
|
|
||||||
num_sanity_val_steps=0,
|
|
||||||
callbacks=[ckpt_callback],
|
|
||||||
use_distributed_sampler=False, # 非常简单的修改,但解决了采用自定义的 bucket_sampler 下训练步数不一致的问题!
|
|
||||||
)
|
|
||||||
|
|
||||||
model: Text2SemanticLightningModule = Text2SemanticLightningModule(
|
|
||||||
config, output_dir
|
|
||||||
)
|
|
||||||
|
|
||||||
data_module: Text2SemanticDataModule = Text2SemanticDataModule(
|
|
||||||
config,
|
|
||||||
train_semantic_path=config["train_semantic_path"],
|
|
||||||
train_phoneme_path=config["train_phoneme_path"],
|
|
||||||
# dev_semantic_path=args.dev_semantic_path,
|
|
||||||
# dev_phoneme_path=args.dev_phoneme_path
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 使用正则表达式匹配文件名中的数字部分,并按数字大小进行排序
|
|
||||||
newest_ckpt_name = get_newest_ckpt(os.listdir(ckpt_dir))
|
|
||||||
ckpt_path = ckpt_dir / newest_ckpt_name
|
|
||||||
except Exception:
|
|
||||||
ckpt_path = None
|
|
||||||
print("ckpt_path:", ckpt_path)
|
|
||||||
trainer.fit(model, data_module, ckpt_path=ckpt_path)
|
|
||||||
|
|
||||||
|
|
||||||
# srun --gpus-per-node=1 --ntasks-per-node=1 python train.py --path-to-configuration configurations/default.yaml
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-c",
|
|
||||||
"--config_file",
|
|
||||||
type=str,
|
|
||||||
default="configs/s1longer.yaml",
|
|
||||||
help="path of config file",
|
|
||||||
)
|
|
||||||
# args for dataset
|
|
||||||
# parser.add_argument('--train_semantic_path',type=str,default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/6-name2semantic.tsv')
|
|
||||||
# parser.add_argument('--train_phoneme_path', type=str, default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/2-name2text.txt')
|
|
||||||
|
|
||||||
# parser.add_argument('--dev_semantic_path', type=str, default='dump_mix/semantic_dev.tsv')
|
|
||||||
# parser.add_argument('--dev_phoneme_path', type=str, default='dump_mix/phoneme_dev.npy')
|
|
||||||
# parser.add_argument('--output_dir',type=str,default='/data/docker/liujing04/gpt-vits/fine_tune_dataset/xuangou/logs_s1',help='directory to save the results')
|
|
||||||
# parser.add_argument('--output_dir',type=str,default='/liujing04/gpt_logs/s1/xuangou_ft',help='directory to save the results')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
logging.info(str(args))
|
|
||||||
main(args)
|
|
@ -1,604 +0,0 @@
|
|||||||
import warnings
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
import utils, os
|
|
||||||
hps = utils.get_hparams(stage=2)
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",")
|
|
||||||
import torch
|
|
||||||
from torch.nn import functional as F
|
|
||||||
from torch.utils.data import DataLoader
|
|
||||||
from torch.utils.tensorboard import SummaryWriter
|
|
||||||
import torch.multiprocessing as mp
|
|
||||||
import torch.distributed as dist, traceback
|
|
||||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
||||||
from torch.cuda.amp import autocast, GradScaler
|
|
||||||
from tqdm import tqdm
|
|
||||||
import logging, traceback
|
|
||||||
|
|
||||||
logging.getLogger("matplotlib").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("h5py").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("numba").setLevel(logging.INFO)
|
|
||||||
from random import randint
|
|
||||||
from module import commons
|
|
||||||
|
|
||||||
from module.data_utils import (
|
|
||||||
TextAudioSpeakerLoader,
|
|
||||||
TextAudioSpeakerCollate,
|
|
||||||
DistributedBucketSampler,
|
|
||||||
)
|
|
||||||
from module.models import (
|
|
||||||
SynthesizerTrn,
|
|
||||||
MultiPeriodDiscriminator,
|
|
||||||
)
|
|
||||||
from module.losses import generator_loss, discriminator_loss, feature_loss, kl_loss
|
|
||||||
from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
|
||||||
from process_ckpt import savee
|
|
||||||
|
|
||||||
torch.backends.cudnn.benchmark = False
|
|
||||||
torch.backends.cudnn.deterministic = False
|
|
||||||
###反正A100fp32更快,那试试tf32吧
|
|
||||||
torch.backends.cuda.matmul.allow_tf32 = True
|
|
||||||
torch.backends.cudnn.allow_tf32 = True
|
|
||||||
torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响
|
|
||||||
# from config import pretrained_s2G,pretrained_s2D
|
|
||||||
global_step = 0
|
|
||||||
|
|
||||||
device = "cpu" # cuda以外的设备,等mps优化后加入
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
n_gpus = torch.cuda.device_count()
|
|
||||||
else:
|
|
||||||
n_gpus = 1
|
|
||||||
os.environ["MASTER_ADDR"] = "localhost"
|
|
||||||
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
|
|
||||||
|
|
||||||
mp.spawn(
|
|
||||||
run,
|
|
||||||
nprocs=n_gpus,
|
|
||||||
args=(
|
|
||||||
n_gpus,
|
|
||||||
hps,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run(rank, n_gpus, hps):
|
|
||||||
global global_step
|
|
||||||
if rank == 0:
|
|
||||||
logger = utils.get_logger(hps.data.exp_dir)
|
|
||||||
logger.info(hps)
|
|
||||||
# utils.check_git_hash(hps.s2_ckpt_dir)
|
|
||||||
writer = SummaryWriter(log_dir=hps.s2_ckpt_dir)
|
|
||||||
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
|
|
||||||
|
|
||||||
dist.init_process_group(
|
|
||||||
backend = "gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl",
|
|
||||||
init_method="env://?use_libuv=False",
|
|
||||||
world_size=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
)
|
|
||||||
torch.manual_seed(hps.train.seed)
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
torch.cuda.set_device(rank)
|
|
||||||
|
|
||||||
train_dataset = TextAudioSpeakerLoader(hps.data) ########
|
|
||||||
train_sampler = DistributedBucketSampler(
|
|
||||||
train_dataset,
|
|
||||||
hps.train.batch_size,
|
|
||||||
[
|
|
||||||
32,
|
|
||||||
300,
|
|
||||||
400,
|
|
||||||
500,
|
|
||||||
600,
|
|
||||||
700,
|
|
||||||
800,
|
|
||||||
900,
|
|
||||||
1000,
|
|
||||||
1100,
|
|
||||||
1200,
|
|
||||||
1300,
|
|
||||||
1400,
|
|
||||||
1500,
|
|
||||||
1600,
|
|
||||||
1700,
|
|
||||||
1800,
|
|
||||||
1900,
|
|
||||||
],
|
|
||||||
num_replicas=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
shuffle=True,
|
|
||||||
)
|
|
||||||
collate_fn = TextAudioSpeakerCollate()
|
|
||||||
train_loader = DataLoader(
|
|
||||||
train_dataset,
|
|
||||||
num_workers=6,
|
|
||||||
shuffle=False,
|
|
||||||
pin_memory=True,
|
|
||||||
collate_fn=collate_fn,
|
|
||||||
batch_sampler=train_sampler,
|
|
||||||
persistent_workers=True,
|
|
||||||
prefetch_factor=4,
|
|
||||||
)
|
|
||||||
# if rank == 0:
|
|
||||||
# eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True)
|
|
||||||
# eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
|
|
||||||
# batch_size=1, pin_memory=True,
|
|
||||||
# drop_last=False, collate_fn=collate_fn)
|
|
||||||
|
|
||||||
net_g = SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model,
|
|
||||||
).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model,
|
|
||||||
).to(device)
|
|
||||||
|
|
||||||
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device)
|
|
||||||
for name, param in net_g.named_parameters():
|
|
||||||
if not param.requires_grad:
|
|
||||||
print(name, "not requires_grad")
|
|
||||||
|
|
||||||
te_p = list(map(id, net_g.enc_p.text_embedding.parameters()))
|
|
||||||
et_p = list(map(id, net_g.enc_p.encoder_text.parameters()))
|
|
||||||
mrte_p = list(map(id, net_g.enc_p.mrte.parameters()))
|
|
||||||
base_params = filter(
|
|
||||||
lambda p: id(p) not in te_p + et_p + mrte_p and p.requires_grad,
|
|
||||||
net_g.parameters(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# te_p=net_g.enc_p.text_embedding.parameters()
|
|
||||||
# et_p=net_g.enc_p.encoder_text.parameters()
|
|
||||||
# mrte_p=net_g.enc_p.mrte.parameters()
|
|
||||||
|
|
||||||
optim_g = torch.optim.AdamW(
|
|
||||||
# filter(lambda p: p.requires_grad, net_g.parameters()),###默认所有层lr一致
|
|
||||||
[
|
|
||||||
{"params": base_params, "lr": hps.train.learning_rate},
|
|
||||||
{
|
|
||||||
"params": net_g.enc_p.text_embedding.parameters(),
|
|
||||||
"lr": hps.train.learning_rate * hps.train.text_low_lr_rate,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"params": net_g.enc_p.encoder_text.parameters(),
|
|
||||||
"lr": hps.train.learning_rate * hps.train.text_low_lr_rate,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"params": net_g.enc_p.mrte.parameters(),
|
|
||||||
"lr": hps.train.learning_rate * hps.train.text_low_lr_rate,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
hps.train.learning_rate,
|
|
||||||
betas=hps.train.betas,
|
|
||||||
eps=hps.train.eps,
|
|
||||||
)
|
|
||||||
optim_d = torch.optim.AdamW(
|
|
||||||
net_d.parameters(),
|
|
||||||
hps.train.learning_rate,
|
|
||||||
betas=hps.train.betas,
|
|
||||||
eps=hps.train.eps,
|
|
||||||
)
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
|
|
||||||
net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
|
|
||||||
else:
|
|
||||||
net_g = net_g.to(device)
|
|
||||||
net_d = net_d.to(device)
|
|
||||||
|
|
||||||
try: # 如果能加载自动resume
|
|
||||||
_, _, _, epoch_str = utils.load_checkpoint(
|
|
||||||
utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_*.pth"),
|
|
||||||
net_d,
|
|
||||||
optim_d,
|
|
||||||
) # D多半加载没事
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("loaded D")
|
|
||||||
# _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0)
|
|
||||||
_, _, _, epoch_str = utils.load_checkpoint(
|
|
||||||
utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_*.pth"),
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
)
|
|
||||||
epoch_str+=1
|
|
||||||
global_step = (epoch_str - 1) * len(train_loader)
|
|
||||||
# epoch_str = 1
|
|
||||||
# global_step = 0
|
|
||||||
except: # 如果首次不能加载,加载pretrain
|
|
||||||
# traceback.print_exc()
|
|
||||||
epoch_str = 1
|
|
||||||
global_step = 0
|
|
||||||
if hps.train.pretrained_s2G != ""and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G):
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("loaded pretrained %s" % hps.train.pretrained_s2G)
|
|
||||||
print("loaded pretrained %s" % hps.train.pretrained_s2G,
|
|
||||||
net_g.module.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"],
|
|
||||||
strict=False,
|
|
||||||
) if torch.cuda.is_available() else net_g.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"],
|
|
||||||
strict=False,
|
|
||||||
)
|
|
||||||
) ##测试不加载优化器
|
|
||||||
if hps.train.pretrained_s2D != ""and hps.train.pretrained_s2D != None and os.path.exists(hps.train.pretrained_s2D):
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("loaded pretrained %s" % hps.train.pretrained_s2D)
|
|
||||||
print("loaded pretrained %s" % hps.train.pretrained_s2D,
|
|
||||||
net_d.module.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"]
|
|
||||||
) if torch.cuda.is_available() else net_d.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
|
||||||
# scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
|
||||||
|
|
||||||
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
|
|
||||||
optim_g, gamma=hps.train.lr_decay, last_epoch=-1
|
|
||||||
)
|
|
||||||
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(
|
|
||||||
optim_d, gamma=hps.train.lr_decay, last_epoch=-1
|
|
||||||
)
|
|
||||||
for _ in range(epoch_str):
|
|
||||||
scheduler_g.step()
|
|
||||||
scheduler_d.step()
|
|
||||||
|
|
||||||
scaler = GradScaler(enabled=hps.train.fp16_run)
|
|
||||||
|
|
||||||
print("start training from epoch %s" % epoch_str)
|
|
||||||
for epoch in range(epoch_str, hps.train.epochs + 1):
|
|
||||||
if rank == 0:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
# [train_loader, eval_loader], logger, [writer, writer_eval])
|
|
||||||
[train_loader, None],
|
|
||||||
logger,
|
|
||||||
[writer, writer_eval],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
[train_loader, None],
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
scheduler_g.step()
|
|
||||||
scheduler_d.step()
|
|
||||||
print("training done")
|
|
||||||
|
|
||||||
|
|
||||||
def train_and_evaluate(
|
|
||||||
rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers
|
|
||||||
):
|
|
||||||
net_g, net_d = nets
|
|
||||||
optim_g, optim_d = optims
|
|
||||||
# scheduler_g, scheduler_d = schedulers
|
|
||||||
train_loader, eval_loader = loaders
|
|
||||||
if writers is not None:
|
|
||||||
writer, writer_eval = writers
|
|
||||||
|
|
||||||
train_loader.batch_sampler.set_epoch(epoch)
|
|
||||||
global global_step
|
|
||||||
|
|
||||||
net_g.train()
|
|
||||||
net_d.train()
|
|
||||||
for batch_idx, (
|
|
||||||
ssl,
|
|
||||||
ssl_lengths,
|
|
||||||
spec,
|
|
||||||
spec_lengths,
|
|
||||||
y,
|
|
||||||
y_lengths,
|
|
||||||
text,
|
|
||||||
text_lengths,
|
|
||||||
) in enumerate(tqdm(train_loader)):
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
ssl = ssl.cuda(rank, non_blocking=True)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
|
|
||||||
text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
|
|
||||||
y, y_lengths = y.to(device), y_lengths.to(device)
|
|
||||||
ssl = ssl.to(device)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
|
|
||||||
text, text_lengths = text.to(device), text_lengths.to(device)
|
|
||||||
|
|
||||||
with autocast(enabled=hps.train.fp16_run):
|
|
||||||
(
|
|
||||||
y_hat,
|
|
||||||
kl_ssl,
|
|
||||||
ids_slice,
|
|
||||||
x_mask,
|
|
||||||
z_mask,
|
|
||||||
(z, z_p, m_p, logs_p, m_q, logs_q),
|
|
||||||
stats_ssl,
|
|
||||||
) = net_g(ssl, spec, spec_lengths, text, text_lengths)
|
|
||||||
|
|
||||||
mel = spec_to_mel_torch(
|
|
||||||
spec,
|
|
||||||
hps.data.filter_length,
|
|
||||||
hps.data.n_mel_channels,
|
|
||||||
hps.data.sampling_rate,
|
|
||||||
hps.data.mel_fmin,
|
|
||||||
hps.data.mel_fmax,
|
|
||||||
)
|
|
||||||
y_mel = commons.slice_segments(
|
|
||||||
mel, ids_slice, hps.train.segment_size // hps.data.hop_length
|
|
||||||
)
|
|
||||||
y_hat_mel = mel_spectrogram_torch(
|
|
||||||
y_hat.squeeze(1),
|
|
||||||
hps.data.filter_length,
|
|
||||||
hps.data.n_mel_channels,
|
|
||||||
hps.data.sampling_rate,
|
|
||||||
hps.data.hop_length,
|
|
||||||
hps.data.win_length,
|
|
||||||
hps.data.mel_fmin,
|
|
||||||
hps.data.mel_fmax,
|
|
||||||
)
|
|
||||||
|
|
||||||
y = commons.slice_segments(
|
|
||||||
y, ids_slice * hps.data.hop_length, hps.train.segment_size
|
|
||||||
) # slice
|
|
||||||
|
|
||||||
# Discriminator
|
|
||||||
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
|
||||||
with autocast(enabled=False):
|
|
||||||
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
|
|
||||||
y_d_hat_r, y_d_hat_g
|
|
||||||
)
|
|
||||||
loss_disc_all = loss_disc
|
|
||||||
optim_d.zero_grad()
|
|
||||||
scaler.scale(loss_disc_all).backward()
|
|
||||||
scaler.unscale_(optim_d)
|
|
||||||
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
|
||||||
scaler.step(optim_d)
|
|
||||||
|
|
||||||
with autocast(enabled=hps.train.fp16_run):
|
|
||||||
# Generator
|
|
||||||
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
|
||||||
with autocast(enabled=False):
|
|
||||||
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
|
||||||
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
|
||||||
|
|
||||||
loss_fm = feature_loss(fmap_r, fmap_g)
|
|
||||||
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
|
||||||
loss_gen_all = loss_gen + loss_fm + loss_mel + kl_ssl * 1 + loss_kl
|
|
||||||
|
|
||||||
optim_g.zero_grad()
|
|
||||||
scaler.scale(loss_gen_all).backward()
|
|
||||||
scaler.unscale_(optim_g)
|
|
||||||
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
|
||||||
scaler.step(optim_g)
|
|
||||||
scaler.update()
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
if global_step % hps.train.log_interval == 0:
|
|
||||||
lr = optim_g.param_groups[0]["lr"]
|
|
||||||
losses = [loss_disc, loss_gen, loss_fm, loss_mel, kl_ssl, loss_kl]
|
|
||||||
logger.info(
|
|
||||||
"Train Epoch: {} [{:.0f}%]".format(
|
|
||||||
epoch, 100.0 * batch_idx / len(train_loader)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
logger.info([x.item() for x in losses] + [global_step, lr])
|
|
||||||
|
|
||||||
scalar_dict = {
|
|
||||||
"loss/g/total": loss_gen_all,
|
|
||||||
"loss/d/total": loss_disc_all,
|
|
||||||
"learning_rate": lr,
|
|
||||||
"grad_norm_d": grad_norm_d,
|
|
||||||
"grad_norm_g": grad_norm_g,
|
|
||||||
}
|
|
||||||
scalar_dict.update(
|
|
||||||
{
|
|
||||||
"loss/g/fm": loss_fm,
|
|
||||||
"loss/g/mel": loss_mel,
|
|
||||||
"loss/g/kl_ssl": kl_ssl,
|
|
||||||
"loss/g/kl": loss_kl,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
|
||||||
# scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
|
||||||
# scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
|
||||||
image_dict = {
|
|
||||||
"slice/mel_org": utils.plot_spectrogram_to_numpy(
|
|
||||||
y_mel[0].data.cpu().numpy()
|
|
||||||
),
|
|
||||||
"slice/mel_gen": utils.plot_spectrogram_to_numpy(
|
|
||||||
y_hat_mel[0].data.cpu().numpy()
|
|
||||||
),
|
|
||||||
"all/mel": utils.plot_spectrogram_to_numpy(
|
|
||||||
mel[0].data.cpu().numpy()
|
|
||||||
),
|
|
||||||
"all/stats_ssl": utils.plot_spectrogram_to_numpy(
|
|
||||||
stats_ssl[0].data.cpu().numpy()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
utils.summarize(
|
|
||||||
writer=writer,
|
|
||||||
global_step=global_step,
|
|
||||||
images=image_dict,
|
|
||||||
scalars=scalar_dict,
|
|
||||||
)
|
|
||||||
global_step += 1
|
|
||||||
if epoch % hps.train.save_every_epoch == 0 and rank == 0:
|
|
||||||
if hps.train.if_save_latest == 0:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_{}.pth".format(global_step)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_d,
|
|
||||||
optim_d,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(global_step)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_{}.pth".format(233333333333)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_d,
|
|
||||||
optim_d,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(233333333333)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if rank == 0 and hps.train.if_save_every_weights == True:
|
|
||||||
if hasattr(net_g, "module"):
|
|
||||||
ckpt = net_g.module.state_dict()
|
|
||||||
else:
|
|
||||||
ckpt = net_g.state_dict()
|
|
||||||
logger.info(
|
|
||||||
"saving ckpt %s_e%s:%s"
|
|
||||||
% (
|
|
||||||
hps.name,
|
|
||||||
epoch,
|
|
||||||
savee(
|
|
||||||
ckpt,
|
|
||||||
hps.name + "_e%s_s%s" % (epoch, global_step),
|
|
||||||
epoch,
|
|
||||||
global_step,
|
|
||||||
hps,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("====> Epoch: {}".format(epoch))
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate(hps, generator, eval_loader, writer_eval):
|
|
||||||
generator.eval()
|
|
||||||
image_dict = {}
|
|
||||||
audio_dict = {}
|
|
||||||
print("Evaluating ...")
|
|
||||||
with torch.no_grad():
|
|
||||||
for batch_idx, (
|
|
||||||
ssl,
|
|
||||||
ssl_lengths,
|
|
||||||
spec,
|
|
||||||
spec_lengths,
|
|
||||||
y,
|
|
||||||
y_lengths,
|
|
||||||
text,
|
|
||||||
text_lengths,
|
|
||||||
) in enumerate(eval_loader):
|
|
||||||
print(111)
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
|
|
||||||
y, y_lengths = y.cuda(), y_lengths.cuda()
|
|
||||||
ssl = ssl.cuda()
|
|
||||||
text, text_lengths = text.cuda(), text_lengths.cuda()
|
|
||||||
else:
|
|
||||||
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
|
|
||||||
y, y_lengths = y.to(device), y_lengths.to(device)
|
|
||||||
ssl = ssl.to(device)
|
|
||||||
text, text_lengths = text.to(device), text_lengths.to(device)
|
|
||||||
for test in [0, 1]:
|
|
||||||
y_hat, mask, *_ = generator.module.infer(
|
|
||||||
ssl, spec, spec_lengths, text, text_lengths, test=test
|
|
||||||
) if torch.cuda.is_available() else generator.infer(
|
|
||||||
ssl, spec, spec_lengths, text, text_lengths, test=test
|
|
||||||
)
|
|
||||||
y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
|
|
||||||
|
|
||||||
mel = spec_to_mel_torch(
|
|
||||||
spec,
|
|
||||||
hps.data.filter_length,
|
|
||||||
hps.data.n_mel_channels,
|
|
||||||
hps.data.sampling_rate,
|
|
||||||
hps.data.mel_fmin,
|
|
||||||
hps.data.mel_fmax,
|
|
||||||
)
|
|
||||||
y_hat_mel = mel_spectrogram_torch(
|
|
||||||
y_hat.squeeze(1).float(),
|
|
||||||
hps.data.filter_length,
|
|
||||||
hps.data.n_mel_channels,
|
|
||||||
hps.data.sampling_rate,
|
|
||||||
hps.data.hop_length,
|
|
||||||
hps.data.win_length,
|
|
||||||
hps.data.mel_fmin,
|
|
||||||
hps.data.mel_fmax,
|
|
||||||
)
|
|
||||||
image_dict.update(
|
|
||||||
{
|
|
||||||
f"gen/mel_{batch_idx}_{test}": utils.plot_spectrogram_to_numpy(
|
|
||||||
y_hat_mel[0].cpu().numpy()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
audio_dict.update(
|
|
||||||
{f"gen/audio_{batch_idx}_{test}": y_hat[0, :, : y_hat_lengths[0]]}
|
|
||||||
)
|
|
||||||
image_dict.update(
|
|
||||||
{
|
|
||||||
f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(
|
|
||||||
mel[0].cpu().numpy()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, : y_lengths[0]]})
|
|
||||||
|
|
||||||
# y_hat, mask, *_ = generator.module.infer(ssl, spec_lengths, speakers, y=None)
|
|
||||||
# audio_dict.update({
|
|
||||||
# f"gen/audio_{batch_idx}_style_pred": y_hat[0, :, :]
|
|
||||||
# })
|
|
||||||
|
|
||||||
utils.summarize(
|
|
||||||
writer=writer_eval,
|
|
||||||
global_step=global_step,
|
|
||||||
images=image_dict,
|
|
||||||
audios=audio_dict,
|
|
||||||
audio_sampling_rate=hps.data.sampling_rate,
|
|
||||||
)
|
|
||||||
generator.train()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,416 +0,0 @@
|
|||||||
import warnings
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
import utils, os
|
|
||||||
hps = utils.get_hparams(stage=2)
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",")
|
|
||||||
import torch
|
|
||||||
from torch.nn import functional as F
|
|
||||||
from torch.utils.data import DataLoader
|
|
||||||
from torch.utils.tensorboard import SummaryWriter
|
|
||||||
import torch.multiprocessing as mp
|
|
||||||
import torch.distributed as dist, traceback
|
|
||||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
||||||
from torch.cuda.amp import autocast, GradScaler
|
|
||||||
from tqdm import tqdm
|
|
||||||
import logging, traceback
|
|
||||||
|
|
||||||
logging.getLogger("matplotlib").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("h5py").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("numba").setLevel(logging.INFO)
|
|
||||||
from random import randint
|
|
||||||
from module import commons
|
|
||||||
|
|
||||||
from module.data_utils import (
|
|
||||||
TextAudioSpeakerLoaderV3 as TextAudioSpeakerLoader,
|
|
||||||
TextAudioSpeakerCollateV3 as TextAudioSpeakerCollate,
|
|
||||||
DistributedBucketSampler,
|
|
||||||
)
|
|
||||||
from module.models import (
|
|
||||||
SynthesizerTrnV3 as SynthesizerTrn,
|
|
||||||
MultiPeriodDiscriminator,
|
|
||||||
)
|
|
||||||
from module.losses import generator_loss, discriminator_loss, feature_loss, kl_loss
|
|
||||||
from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
|
||||||
from process_ckpt import savee
|
|
||||||
|
|
||||||
torch.backends.cudnn.benchmark = False
|
|
||||||
torch.backends.cudnn.deterministic = False
|
|
||||||
###反正A100fp32更快,那试试tf32吧
|
|
||||||
torch.backends.cuda.matmul.allow_tf32 = True
|
|
||||||
torch.backends.cudnn.allow_tf32 = True
|
|
||||||
torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响
|
|
||||||
# from config import pretrained_s2G,pretrained_s2D
|
|
||||||
global_step = 0
|
|
||||||
|
|
||||||
device = "cpu" # cuda以外的设备,等mps优化后加入
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
n_gpus = torch.cuda.device_count()
|
|
||||||
else:
|
|
||||||
n_gpus = 1
|
|
||||||
os.environ["MASTER_ADDR"] = "localhost"
|
|
||||||
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
|
|
||||||
|
|
||||||
mp.spawn(
|
|
||||||
run,
|
|
||||||
nprocs=n_gpus,
|
|
||||||
args=(
|
|
||||||
n_gpus,
|
|
||||||
hps,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run(rank, n_gpus, hps):
|
|
||||||
global global_step
|
|
||||||
if rank == 0:
|
|
||||||
logger = utils.get_logger(hps.data.exp_dir)
|
|
||||||
logger.info(hps)
|
|
||||||
# utils.check_git_hash(hps.s2_ckpt_dir)
|
|
||||||
writer = SummaryWriter(log_dir=hps.s2_ckpt_dir)
|
|
||||||
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
|
|
||||||
|
|
||||||
dist.init_process_group(
|
|
||||||
backend = "gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl",
|
|
||||||
init_method="env://?use_libuv=False",
|
|
||||||
world_size=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
)
|
|
||||||
torch.manual_seed(hps.train.seed)
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
torch.cuda.set_device(rank)
|
|
||||||
|
|
||||||
train_dataset = TextAudioSpeakerLoader(hps.data) ########
|
|
||||||
train_sampler = DistributedBucketSampler(
|
|
||||||
train_dataset,
|
|
||||||
hps.train.batch_size,
|
|
||||||
[
|
|
||||||
32,
|
|
||||||
300,
|
|
||||||
400,
|
|
||||||
500,
|
|
||||||
600,
|
|
||||||
700,
|
|
||||||
800,
|
|
||||||
900,
|
|
||||||
1000,
|
|
||||||
# 1100,
|
|
||||||
# 1200,
|
|
||||||
# 1300,
|
|
||||||
# 1400,
|
|
||||||
# 1500,
|
|
||||||
# 1600,
|
|
||||||
# 1700,
|
|
||||||
# 1800,
|
|
||||||
# 1900,
|
|
||||||
],
|
|
||||||
num_replicas=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
shuffle=True,
|
|
||||||
)
|
|
||||||
collate_fn = TextAudioSpeakerCollate()
|
|
||||||
train_loader = DataLoader(
|
|
||||||
train_dataset,
|
|
||||||
num_workers=6,
|
|
||||||
shuffle=False,
|
|
||||||
pin_memory=True,
|
|
||||||
collate_fn=collate_fn,
|
|
||||||
batch_sampler=train_sampler,
|
|
||||||
persistent_workers=True,
|
|
||||||
prefetch_factor=4,
|
|
||||||
)
|
|
||||||
# if rank == 0:
|
|
||||||
# eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data, val=True)
|
|
||||||
# eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
|
|
||||||
# batch_size=1, pin_memory=True,
|
|
||||||
# drop_last=False, collate_fn=collate_fn)
|
|
||||||
|
|
||||||
net_g = SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model,
|
|
||||||
).cuda(rank) if torch.cuda.is_available() else SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model,
|
|
||||||
).to(device)
|
|
||||||
|
|
||||||
# net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device)
|
|
||||||
# for name, param in net_g.named_parameters():
|
|
||||||
# if not param.requires_grad:
|
|
||||||
# print(name, "not requires_grad")
|
|
||||||
|
|
||||||
optim_g = torch.optim.AdamW(
|
|
||||||
filter(lambda p: p.requires_grad, net_g.parameters()),###默认所有层lr一致
|
|
||||||
hps.train.learning_rate,
|
|
||||||
betas=hps.train.betas,
|
|
||||||
eps=hps.train.eps,
|
|
||||||
)
|
|
||||||
# optim_d = torch.optim.AdamW(
|
|
||||||
# net_d.parameters(),
|
|
||||||
# hps.train.learning_rate,
|
|
||||||
# betas=hps.train.betas,
|
|
||||||
# eps=hps.train.eps,
|
|
||||||
# )
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
|
|
||||||
# net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
|
|
||||||
else:
|
|
||||||
net_g = net_g.to(device)
|
|
||||||
# net_d = net_d.to(device)
|
|
||||||
|
|
||||||
try: # 如果能加载自动resume
|
|
||||||
# _, _, _, epoch_str = utils.load_checkpoint(
|
|
||||||
# utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_*.pth"),
|
|
||||||
# net_d,
|
|
||||||
# optim_d,
|
|
||||||
# ) # D多半加载没事
|
|
||||||
# if rank == 0:
|
|
||||||
# logger.info("loaded D")
|
|
||||||
# _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0)
|
|
||||||
_, _, _, epoch_str = utils.load_checkpoint(
|
|
||||||
utils.latest_checkpoint_path("%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_*.pth"),
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
)
|
|
||||||
epoch_str+=1
|
|
||||||
global_step = (epoch_str - 1) * len(train_loader)
|
|
||||||
# epoch_str = 1
|
|
||||||
# global_step = 0
|
|
||||||
except: # 如果首次不能加载,加载pretrain
|
|
||||||
# traceback.print_exc()
|
|
||||||
epoch_str = 1
|
|
||||||
global_step = 0
|
|
||||||
if hps.train.pretrained_s2G != ""and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G):
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("loaded pretrained %s" % hps.train.pretrained_s2G)
|
|
||||||
print("loaded pretrained %s" % hps.train.pretrained_s2G,
|
|
||||||
net_g.module.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"],
|
|
||||||
strict=False,
|
|
||||||
) if torch.cuda.is_available() else net_g.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"],
|
|
||||||
strict=False,
|
|
||||||
)
|
|
||||||
) ##测试不加载优化器
|
|
||||||
# if hps.train.pretrained_s2D != ""and hps.train.pretrained_s2D != None and os.path.exists(hps.train.pretrained_s2D):
|
|
||||||
# if rank == 0:
|
|
||||||
# logger.info("loaded pretrained %s" % hps.train.pretrained_s2D)
|
|
||||||
# print(
|
|
||||||
# net_d.module.load_state_dict(
|
|
||||||
# torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"]
|
|
||||||
# ) if torch.cuda.is_available() else net_d.load_state_dict(
|
|
||||||
# torch.load(hps.train.pretrained_s2D, map_location="cpu")["weight"]
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
|
|
||||||
# scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
|
||||||
# scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
|
|
||||||
|
|
||||||
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
|
|
||||||
optim_g, gamma=hps.train.lr_decay, last_epoch=-1
|
|
||||||
)
|
|
||||||
# scheduler_d = torch.optim.lr_scheduler.ExponentialLR(
|
|
||||||
# optim_d, gamma=hps.train.lr_decay, last_epoch=-1
|
|
||||||
# )
|
|
||||||
for _ in range(epoch_str):
|
|
||||||
scheduler_g.step()
|
|
||||||
# scheduler_d.step()
|
|
||||||
|
|
||||||
scaler = GradScaler(enabled=hps.train.fp16_run)
|
|
||||||
|
|
||||||
net_d=optim_d=scheduler_d=None
|
|
||||||
print("start training from epoch %s" % epoch_str)
|
|
||||||
for epoch in range(epoch_str, hps.train.epochs + 1):
|
|
||||||
if rank == 0:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
# [train_loader, eval_loader], logger, [writer, writer_eval])
|
|
||||||
[train_loader, None],
|
|
||||||
logger,
|
|
||||||
[writer, writer_eval],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
[train_loader, None],
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
scheduler_g.step()
|
|
||||||
# scheduler_d.step()
|
|
||||||
print("training done")
|
|
||||||
|
|
||||||
|
|
||||||
def train_and_evaluate(
|
|
||||||
rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers
|
|
||||||
):
|
|
||||||
net_g, net_d = nets
|
|
||||||
optim_g, optim_d = optims
|
|
||||||
# scheduler_g, scheduler_d = schedulers
|
|
||||||
train_loader, eval_loader = loaders
|
|
||||||
if writers is not None:
|
|
||||||
writer, writer_eval = writers
|
|
||||||
|
|
||||||
train_loader.batch_sampler.set_epoch(epoch)
|
|
||||||
global global_step
|
|
||||||
|
|
||||||
net_g.train()
|
|
||||||
# net_d.train()
|
|
||||||
# for batch_idx, (
|
|
||||||
# ssl,
|
|
||||||
# ssl_lengths,
|
|
||||||
# spec,
|
|
||||||
# spec_lengths,
|
|
||||||
# y,
|
|
||||||
# y_lengths,
|
|
||||||
# text,
|
|
||||||
# text_lengths,
|
|
||||||
# ) in enumerate(tqdm(train_loader)):
|
|
||||||
for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate(tqdm(train_loader)):
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
ssl = ssl.cuda(rank, non_blocking=True)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
|
|
||||||
text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
|
|
||||||
mel, mel_lengths = mel.to(device), mel_lengths.to(device)
|
|
||||||
ssl = ssl.to(device)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
|
|
||||||
text, text_lengths = text.to(device), text_lengths.to(device)
|
|
||||||
|
|
||||||
with autocast(enabled=hps.train.fp16_run):
|
|
||||||
cfm_loss = net_g(ssl, spec, mel,ssl_lengths,spec_lengths, text, text_lengths,mel_lengths, use_grad_ckpt=hps.train.grad_ckpt)
|
|
||||||
loss_gen_all=cfm_loss
|
|
||||||
optim_g.zero_grad()
|
|
||||||
scaler.scale(loss_gen_all).backward()
|
|
||||||
scaler.unscale_(optim_g)
|
|
||||||
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
|
||||||
scaler.step(optim_g)
|
|
||||||
scaler.update()
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
if global_step % hps.train.log_interval == 0:
|
|
||||||
lr = optim_g.param_groups[0]['lr']
|
|
||||||
# losses = [commit_loss,cfm_loss,mel_loss,loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
|
|
||||||
losses = [cfm_loss]
|
|
||||||
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
|
||||||
epoch,
|
|
||||||
100. * batch_idx / len(train_loader)))
|
|
||||||
logger.info([x.item() for x in losses] + [global_step, lr])
|
|
||||||
|
|
||||||
scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g}
|
|
||||||
# image_dict = {
|
|
||||||
# "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
|
||||||
# "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
|
||||||
# "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
|
||||||
# "all/stats_ssl": utils.plot_spectrogram_to_numpy(stats_ssl[0].data.cpu().numpy()),
|
|
||||||
# }
|
|
||||||
utils.summarize(
|
|
||||||
writer=writer,
|
|
||||||
global_step=global_step,
|
|
||||||
# images=image_dict,
|
|
||||||
scalars=scalar_dict)
|
|
||||||
|
|
||||||
# if global_step % hps.train.eval_interval == 0:
|
|
||||||
# # evaluate(hps, net_g, eval_loader, writer_eval)
|
|
||||||
# utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "G_{}.pth".format(global_step)),scaler)
|
|
||||||
# # utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,os.path.join(hps.s2_ckpt_dir, "D_{}.pth".format(global_step)),scaler)
|
|
||||||
# # keep_ckpts = getattr(hps.train, 'keep_ckpts', 3)
|
|
||||||
# # if keep_ckpts > 0:
|
|
||||||
# # utils.clean_checkpoints(path_to_models=hps.s2_ckpt_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
|
|
||||||
|
|
||||||
|
|
||||||
global_step += 1
|
|
||||||
if epoch % hps.train.save_every_epoch == 0 and rank == 0:
|
|
||||||
if hps.train.if_save_latest == 0:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_{}.pth".format(global_step)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# utils.save_checkpoint(
|
|
||||||
# net_d,
|
|
||||||
# optim_d,
|
|
||||||
# hps.train.learning_rate,
|
|
||||||
# epoch,
|
|
||||||
# os.path.join(
|
|
||||||
# "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(global_step)
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
else:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
"%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "G_{}.pth".format(233333333333)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
# utils.save_checkpoint(
|
|
||||||
# net_d,
|
|
||||||
# optim_d,
|
|
||||||
# hps.train.learning_rate,
|
|
||||||
# epoch,
|
|
||||||
# os.path.join(
|
|
||||||
# "%s/logs_s2_%s" % (hps.data.exp_dir,hps.model.version), "D_{}.pth".format(233333333333)
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
if rank == 0 and hps.train.if_save_every_weights == True:
|
|
||||||
if hasattr(net_g, "module"):
|
|
||||||
ckpt = net_g.module.state_dict()
|
|
||||||
else:
|
|
||||||
ckpt = net_g.state_dict()
|
|
||||||
logger.info(
|
|
||||||
"saving ckpt %s_e%s:%s"
|
|
||||||
% (
|
|
||||||
hps.name,
|
|
||||||
epoch,
|
|
||||||
savee(
|
|
||||||
ckpt,
|
|
||||||
hps.name + "_e%s_s%s" % (epoch, global_step),
|
|
||||||
epoch,
|
|
||||||
global_step,
|
|
||||||
hps,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("====> Epoch: {}".format(epoch))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,345 +0,0 @@
|
|||||||
import warnings
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
import utils, os
|
|
||||||
hps = utils.get_hparams(stage=2)
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = hps.train.gpu_numbers.replace("-", ",")
|
|
||||||
import torch
|
|
||||||
from torch.nn import functional as F
|
|
||||||
from torch.utils.data import DataLoader
|
|
||||||
from torch.utils.tensorboard import SummaryWriter
|
|
||||||
import torch.multiprocessing as mp
|
|
||||||
import torch.distributed as dist, traceback
|
|
||||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
||||||
from torch.cuda.amp import autocast, GradScaler
|
|
||||||
from tqdm import tqdm
|
|
||||||
import logging, traceback
|
|
||||||
|
|
||||||
logging.getLogger("matplotlib").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("h5py").setLevel(logging.INFO)
|
|
||||||
logging.getLogger("numba").setLevel(logging.INFO)
|
|
||||||
from random import randint
|
|
||||||
from module import commons
|
|
||||||
from peft import LoraConfig, PeftModel, get_peft_model
|
|
||||||
from module.data_utils import (
|
|
||||||
TextAudioSpeakerLoaderV3 as TextAudioSpeakerLoader,
|
|
||||||
TextAudioSpeakerCollateV3 as TextAudioSpeakerCollate,
|
|
||||||
DistributedBucketSampler,
|
|
||||||
)
|
|
||||||
from module.models import (
|
|
||||||
SynthesizerTrnV3 as SynthesizerTrn,
|
|
||||||
MultiPeriodDiscriminator,
|
|
||||||
)
|
|
||||||
from module.losses import generator_loss, discriminator_loss, feature_loss, kl_loss
|
|
||||||
from module.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
|
||||||
from process_ckpt import savee
|
|
||||||
from collections import OrderedDict as od
|
|
||||||
torch.backends.cudnn.benchmark = False
|
|
||||||
torch.backends.cudnn.deterministic = False
|
|
||||||
###反正A100fp32更快,那试试tf32吧
|
|
||||||
torch.backends.cuda.matmul.allow_tf32 = True
|
|
||||||
torch.backends.cudnn.allow_tf32 = True
|
|
||||||
torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就快一丁点),对于结果造成不了影响
|
|
||||||
# from config import pretrained_s2G,pretrained_s2D
|
|
||||||
global_step = 0
|
|
||||||
|
|
||||||
device = "cpu" # cuda以外的设备,等mps优化后加入
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
n_gpus = torch.cuda.device_count()
|
|
||||||
else:
|
|
||||||
n_gpus = 1
|
|
||||||
os.environ["MASTER_ADDR"] = "localhost"
|
|
||||||
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
|
|
||||||
|
|
||||||
mp.spawn(
|
|
||||||
run,
|
|
||||||
nprocs=n_gpus,
|
|
||||||
args=(
|
|
||||||
n_gpus,
|
|
||||||
hps,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run(rank, n_gpus, hps):
|
|
||||||
global global_step,no_grad_names,save_root,lora_rank
|
|
||||||
if rank == 0:
|
|
||||||
logger = utils.get_logger(hps.data.exp_dir)
|
|
||||||
logger.info(hps)
|
|
||||||
# utils.check_git_hash(hps.s2_ckpt_dir)
|
|
||||||
writer = SummaryWriter(log_dir=hps.s2_ckpt_dir)
|
|
||||||
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
|
|
||||||
|
|
||||||
dist.init_process_group(
|
|
||||||
backend = "gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl",
|
|
||||||
init_method="env://?use_libuv=False",
|
|
||||||
world_size=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
)
|
|
||||||
torch.manual_seed(hps.train.seed)
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
torch.cuda.set_device(rank)
|
|
||||||
|
|
||||||
train_dataset = TextAudioSpeakerLoader(hps.data) ########
|
|
||||||
train_sampler = DistributedBucketSampler(
|
|
||||||
train_dataset,
|
|
||||||
hps.train.batch_size,
|
|
||||||
[
|
|
||||||
32,
|
|
||||||
300,
|
|
||||||
400,
|
|
||||||
500,
|
|
||||||
600,
|
|
||||||
700,
|
|
||||||
800,
|
|
||||||
900,
|
|
||||||
1000,
|
|
||||||
# 1100,
|
|
||||||
# 1200,
|
|
||||||
# 1300,
|
|
||||||
# 1400,
|
|
||||||
# 1500,
|
|
||||||
# 1600,
|
|
||||||
# 1700,
|
|
||||||
# 1800,
|
|
||||||
# 1900,
|
|
||||||
],
|
|
||||||
num_replicas=n_gpus,
|
|
||||||
rank=rank,
|
|
||||||
shuffle=True,
|
|
||||||
)
|
|
||||||
collate_fn = TextAudioSpeakerCollate()
|
|
||||||
train_loader = DataLoader(
|
|
||||||
train_dataset,
|
|
||||||
num_workers=6,
|
|
||||||
shuffle=False,
|
|
||||||
pin_memory=True,
|
|
||||||
collate_fn=collate_fn,
|
|
||||||
batch_sampler=train_sampler,
|
|
||||||
persistent_workers=True,
|
|
||||||
prefetch_factor=4,
|
|
||||||
)
|
|
||||||
save_root="%s/logs_s2_%s_lora_%s" % (hps.data.exp_dir,hps.model.version,hps.train.lora_rank)
|
|
||||||
os.makedirs(save_root,exist_ok=True)
|
|
||||||
lora_rank=int(hps.train.lora_rank)
|
|
||||||
lora_config = LoraConfig(
|
|
||||||
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
|
|
||||||
r=lora_rank,
|
|
||||||
lora_alpha=lora_rank,
|
|
||||||
init_lora_weights=True,
|
|
||||||
)
|
|
||||||
def get_model(hps):return SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model,
|
|
||||||
)
|
|
||||||
def get_optim(net_g):
|
|
||||||
return torch.optim.AdamW(
|
|
||||||
filter(lambda p: p.requires_grad, net_g.parameters()), ###默认所有层lr一致
|
|
||||||
hps.train.learning_rate,
|
|
||||||
betas=hps.train.betas,
|
|
||||||
eps=hps.train.eps,
|
|
||||||
)
|
|
||||||
def model2cuda(net_g,rank):
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
net_g = DDP(net_g.cuda(rank), device_ids=[rank], find_unused_parameters=True)
|
|
||||||
else:
|
|
||||||
net_g = net_g.to(device)
|
|
||||||
return net_g
|
|
||||||
try:# 如果能加载自动resume
|
|
||||||
net_g = get_model(hps)
|
|
||||||
net_g.cfm = get_peft_model(net_g.cfm, lora_config)
|
|
||||||
net_g=model2cuda(net_g,rank)
|
|
||||||
optim_g=get_optim(net_g)
|
|
||||||
# _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0)
|
|
||||||
_, _, _, epoch_str = utils.load_checkpoint(
|
|
||||||
utils.latest_checkpoint_path(save_root, "G_*.pth"),
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
)
|
|
||||||
epoch_str+=1
|
|
||||||
global_step = (epoch_str - 1) * len(train_loader)
|
|
||||||
except: # 如果首次不能加载,加载pretrain
|
|
||||||
# traceback.print_exc()
|
|
||||||
epoch_str = 1
|
|
||||||
global_step = 0
|
|
||||||
net_g = get_model(hps)
|
|
||||||
if hps.train.pretrained_s2G != ""and hps.train.pretrained_s2G != None and os.path.exists(hps.train.pretrained_s2G):
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("loaded pretrained %s" % hps.train.pretrained_s2G)
|
|
||||||
print("loaded pretrained %s" % hps.train.pretrained_s2G,
|
|
||||||
net_g.load_state_dict(
|
|
||||||
torch.load(hps.train.pretrained_s2G, map_location="cpu")["weight"],
|
|
||||||
strict=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
net_g.cfm = get_peft_model(net_g.cfm, lora_config)
|
|
||||||
net_g=model2cuda(net_g,rank)
|
|
||||||
optim_g = get_optim(net_g)
|
|
||||||
|
|
||||||
no_grad_names=set()
|
|
||||||
for name, param in net_g.named_parameters():
|
|
||||||
if not param.requires_grad:
|
|
||||||
no_grad_names.add(name.replace("module.",""))
|
|
||||||
# print(name, "not requires_grad")
|
|
||||||
# print(no_grad_names)
|
|
||||||
# os._exit(233333)
|
|
||||||
|
|
||||||
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
|
|
||||||
optim_g, gamma=hps.train.lr_decay, last_epoch=-1
|
|
||||||
)
|
|
||||||
for _ in range(epoch_str):
|
|
||||||
scheduler_g.step()
|
|
||||||
|
|
||||||
scaler = GradScaler(enabled=hps.train.fp16_run)
|
|
||||||
|
|
||||||
net_d=optim_d=scheduler_d=None
|
|
||||||
print("start training from epoch %s"%epoch_str)
|
|
||||||
for epoch in range(epoch_str, hps.train.epochs + 1):
|
|
||||||
if rank == 0:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
# [train_loader, eval_loader], logger, [writer, writer_eval])
|
|
||||||
[train_loader, None],
|
|
||||||
logger,
|
|
||||||
[writer, writer_eval],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
train_and_evaluate(
|
|
||||||
rank,
|
|
||||||
epoch,
|
|
||||||
hps,
|
|
||||||
[net_g, net_d],
|
|
||||||
[optim_g, optim_d],
|
|
||||||
[scheduler_g, scheduler_d],
|
|
||||||
scaler,
|
|
||||||
[train_loader, None],
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
scheduler_g.step()
|
|
||||||
print("training done")
|
|
||||||
|
|
||||||
def train_and_evaluate(
|
|
||||||
rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers
|
|
||||||
):
|
|
||||||
net_g, net_d = nets
|
|
||||||
optim_g, optim_d = optims
|
|
||||||
# scheduler_g, scheduler_d = schedulers
|
|
||||||
train_loader, eval_loader = loaders
|
|
||||||
if writers is not None:
|
|
||||||
writer, writer_eval = writers
|
|
||||||
|
|
||||||
train_loader.batch_sampler.set_epoch(epoch)
|
|
||||||
global global_step
|
|
||||||
|
|
||||||
net_g.train()
|
|
||||||
for batch_idx, (ssl, spec, mel, ssl_lengths, spec_lengths, text, text_lengths, mel_lengths) in enumerate(tqdm(train_loader)):
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
mel, mel_lengths = mel.cuda(rank, non_blocking=True), mel_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
ssl = ssl.cuda(rank, non_blocking=True)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
text, text_lengths = text.cuda(rank, non_blocking=True), text_lengths.cuda(
|
|
||||||
rank, non_blocking=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
|
|
||||||
mel, mel_lengths = mel.to(device), mel_lengths.to(device)
|
|
||||||
ssl = ssl.to(device)
|
|
||||||
ssl.requires_grad = False
|
|
||||||
text, text_lengths = text.to(device), text_lengths.to(device)
|
|
||||||
|
|
||||||
with autocast(enabled=hps.train.fp16_run):
|
|
||||||
cfm_loss = net_g(ssl, spec, mel,ssl_lengths,spec_lengths, text, text_lengths,mel_lengths, use_grad_ckpt=hps.train.grad_ckpt)
|
|
||||||
loss_gen_all=cfm_loss
|
|
||||||
optim_g.zero_grad()
|
|
||||||
scaler.scale(loss_gen_all).backward()
|
|
||||||
scaler.unscale_(optim_g)
|
|
||||||
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
|
||||||
scaler.step(optim_g)
|
|
||||||
scaler.update()
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
if global_step % hps.train.log_interval == 0:
|
|
||||||
lr = optim_g.param_groups[0]['lr']
|
|
||||||
losses = [cfm_loss]
|
|
||||||
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
|
||||||
epoch,
|
|
||||||
100. * batch_idx / len(train_loader)))
|
|
||||||
logger.info([x.item() for x in losses] + [global_step, lr])
|
|
||||||
|
|
||||||
scalar_dict = {"loss/g/total": loss_gen_all, "learning_rate": lr, "grad_norm_g": grad_norm_g}
|
|
||||||
utils.summarize(
|
|
||||||
writer=writer,
|
|
||||||
global_step=global_step,
|
|
||||||
scalars=scalar_dict)
|
|
||||||
|
|
||||||
global_step += 1
|
|
||||||
if epoch % hps.train.save_every_epoch == 0 and rank == 0:
|
|
||||||
if hps.train.if_save_latest == 0:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
save_root, "G_{}.pth".format(global_step)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
utils.save_checkpoint(
|
|
||||||
net_g,
|
|
||||||
optim_g,
|
|
||||||
hps.train.learning_rate,
|
|
||||||
epoch,
|
|
||||||
os.path.join(
|
|
||||||
save_root, "G_{}.pth".format(233333333333)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if rank == 0 and hps.train.if_save_every_weights == True:
|
|
||||||
if hasattr(net_g, "module"):
|
|
||||||
ckpt = net_g.module.state_dict()
|
|
||||||
else:
|
|
||||||
ckpt = net_g.state_dict()
|
|
||||||
sim_ckpt=od()
|
|
||||||
for key in ckpt:
|
|
||||||
# if "cfm"not in key:
|
|
||||||
# print(key)
|
|
||||||
if key not in no_grad_names:
|
|
||||||
sim_ckpt[key]=ckpt[key].half().cpu()
|
|
||||||
logger.info(
|
|
||||||
"saving ckpt %s_e%s:%s"
|
|
||||||
% (
|
|
||||||
hps.name,
|
|
||||||
epoch,
|
|
||||||
savee(
|
|
||||||
sim_ckpt,
|
|
||||||
hps.name + "_e%s_s%s_l%s" % (epoch, global_step,lora_rank),
|
|
||||||
epoch,
|
|
||||||
global_step,
|
|
||||||
hps,lora_rank=lora_rank
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if rank == 0:
|
|
||||||
logger.info("====> Epoch: {}".format(epoch))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -1,152 +0,0 @@
|
|||||||
{
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 0,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"provenance": []
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python3",
|
|
||||||
"display_name": "Python 3"
|
|
||||||
},
|
|
||||||
"accelerator": "GPU"
|
|
||||||
},
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"source": [
|
|
||||||
"# Credits for bubarino giving me the huggingface import code (感谢 bubarino 给了我 huggingface 导入代码)"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "himHYZmra7ix"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "e9b7iFV3dm1f"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"!git clone https://github.com/RVC-Boss/GPT-SoVITS.git\n",
|
|
||||||
"%cd GPT-SoVITS\n",
|
|
||||||
"!apt-get update && apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && git lfs install\n",
|
|
||||||
"!pip install -r requirements.txt"
|
|
||||||
],
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"# @title Download pretrained models 下载预训练模型\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/tools/uvr5\n",
|
|
||||||
"%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
|
|
||||||
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
|
|
||||||
"%cd /content/GPT-SoVITS/tools/damo_asr/models\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
|
|
||||||
"# @title UVR5 pretrains 安装uvr5模型\n",
|
|
||||||
"%cd /content/GPT-SoVITS/tools/uvr5\n",
|
|
||||||
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
|
|
||||||
"!git config core.sparseCheckout true\n",
|
|
||||||
"!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "0NgxXg5sjv7z",
|
|
||||||
"cellView": "form"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"#@title Create folder models 创建文件夹模型\n",
|
|
||||||
"import os\n",
|
|
||||||
"base_directory = \"/content/GPT-SoVITS\"\n",
|
|
||||||
"folder_names = [\"SoVITS_weights\", \"GPT_weights\"]\n",
|
|
||||||
"\n",
|
|
||||||
"for folder_name in folder_names:\n",
|
|
||||||
" if os.path.exists(os.path.join(base_directory, folder_name)):\n",
|
|
||||||
" print(f\"The folder '{folder_name}' already exists. (文件夹'{folder_name}'已经存在。)\")\n",
|
|
||||||
" else:\n",
|
|
||||||
" os.makedirs(os.path.join(base_directory, folder_name))\n",
|
|
||||||
" print(f\"The folder '{folder_name}' was created successfully! (文件夹'{folder_name}'已成功创建!)\")\n",
|
|
||||||
"\n",
|
|
||||||
"print(\"All folders have been created. (所有文件夹均已创建。)\")"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "cPDEH-9czOJF"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"import requests\n",
|
|
||||||
"import zipfile\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"import os\n",
|
|
||||||
"\n",
|
|
||||||
"#@title Import model 导入模型 (HuggingFace)\n",
|
|
||||||
"hf_link = 'https://huggingface.co/modelloosrvcc/Nagisa_Shingetsu_GPT-SoVITS/resolve/main/Nagisa.zip' #@param {type: \"string\"}\n",
|
|
||||||
"\n",
|
|
||||||
"output_path = '/content/'\n",
|
|
||||||
"\n",
|
|
||||||
"response = requests.get(hf_link)\n",
|
|
||||||
"with open(output_path + 'file.zip', 'wb') as file:\n",
|
|
||||||
" file.write(response.content)\n",
|
|
||||||
"\n",
|
|
||||||
"with zipfile.ZipFile(output_path + 'file.zip', 'r') as zip_ref:\n",
|
|
||||||
" zip_ref.extractall(output_path)\n",
|
|
||||||
"\n",
|
|
||||||
"os.remove(output_path + \"file.zip\")\n",
|
|
||||||
"\n",
|
|
||||||
"source_directory = output_path\n",
|
|
||||||
"SoVITS_destination_directory = '/content/GPT-SoVITS/SoVITS_weights'\n",
|
|
||||||
"GPT_destination_directory = '/content/GPT-SoVITS/GPT_weights'\n",
|
|
||||||
"\n",
|
|
||||||
"for filename in os.listdir(source_directory):\n",
|
|
||||||
" if filename.endswith(\".pth\"):\n",
|
|
||||||
" source_path = os.path.join(source_directory, filename)\n",
|
|
||||||
" destination_path = os.path.join(SoVITS_destination_directory, filename)\n",
|
|
||||||
" shutil.move(source_path, destination_path)\n",
|
|
||||||
"\n",
|
|
||||||
"for filename in os.listdir(source_directory):\n",
|
|
||||||
" if filename.endswith(\".ckpt\"):\n",
|
|
||||||
" source_path = os.path.join(source_directory, filename)\n",
|
|
||||||
" destination_path = os.path.join(GPT_destination_directory, filename)\n",
|
|
||||||
" shutil.move(source_path, destination_path)\n",
|
|
||||||
"\n",
|
|
||||||
"print(f'Model downloaded. (模型已下载。)')"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "vbZY-LnM0tzq"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"# @title launch WebUI 启动WebUI\n",
|
|
||||||
"!/usr/local/bin/pip install ipykernel\n",
|
|
||||||
"!sed -i '10s/False/True/' /content/GPT-SoVITS/config.py\n",
|
|
||||||
"%cd /content/GPT-SoVITS/\n",
|
|
||||||
"!/usr/local/bin/python webui.py"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "4oRGUzkrk8C7",
|
|
||||||
"cellView": "form"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
460
api_v2.py
460
api_v2.py
@ -1,460 +0,0 @@
|
|||||||
"""
|
|
||||||
# WebAPI文档
|
|
||||||
|
|
||||||
` python api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml `
|
|
||||||
|
|
||||||
## 执行参数:
|
|
||||||
`-a` - `绑定地址, 默认"127.0.0.1"`
|
|
||||||
`-p` - `绑定端口, 默认9880`
|
|
||||||
`-c` - `TTS配置文件路径, 默认"GPT_SoVITS/configs/tts_infer.yaml"`
|
|
||||||
|
|
||||||
## 调用:
|
|
||||||
|
|
||||||
### 推理
|
|
||||||
|
|
||||||
endpoint: `/tts`
|
|
||||||
GET:
|
|
||||||
```
|
|
||||||
http://127.0.0.1:9880/tts?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_lang=zh&ref_audio_path=archive_jingyuan_1.wav&prompt_lang=zh&prompt_text=我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可&text_split_method=cut5&batch_size=1&media_type=wav&streaming_mode=true
|
|
||||||
```
|
|
||||||
|
|
||||||
POST:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"text": "", # str.(required) text to be synthesized
|
|
||||||
"text_lang: "", # str.(required) language of the text to be synthesized
|
|
||||||
"ref_audio_path": "", # str.(required) reference audio path
|
|
||||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
|
|
||||||
"prompt_text": "", # str.(optional) prompt text for the reference audio
|
|
||||||
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
|
|
||||||
"top_k": 5, # int. top k sampling
|
|
||||||
"top_p": 1, # float. top p sampling
|
|
||||||
"temperature": 1, # float. temperature for sampling
|
|
||||||
"text_split_method": "cut0", # str. text split method, see text_segmentation_method.py for details.
|
|
||||||
"batch_size": 1, # int. batch size for inference
|
|
||||||
"batch_threshold": 0.75, # float. threshold for batch splitting.
|
|
||||||
"split_bucket: True, # bool. whether to split the batch into multiple buckets.
|
|
||||||
"speed_factor":1.0, # float. control the speed of the synthesized audio.
|
|
||||||
"streaming_mode": False, # bool. whether to return a streaming response.
|
|
||||||
"seed": -1, # int. random seed for reproducibility.
|
|
||||||
"parallel_infer": True, # bool. whether to use parallel inference.
|
|
||||||
"repetition_penalty": 1.35 # float. repetition penalty for T2S model.
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
RESP:
|
|
||||||
成功: 直接返回 wav 音频流, http code 200
|
|
||||||
失败: 返回包含错误信息的 json, http code 400
|
|
||||||
|
|
||||||
### 命令控制
|
|
||||||
|
|
||||||
endpoint: `/control`
|
|
||||||
|
|
||||||
command:
|
|
||||||
"restart": 重新运行
|
|
||||||
"exit": 结束运行
|
|
||||||
|
|
||||||
GET:
|
|
||||||
```
|
|
||||||
http://127.0.0.1:9880/control?command=restart
|
|
||||||
```
|
|
||||||
POST:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"command": "restart"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
RESP: 无
|
|
||||||
|
|
||||||
|
|
||||||
### 切换GPT模型
|
|
||||||
|
|
||||||
endpoint: `/set_gpt_weights`
|
|
||||||
|
|
||||||
GET:
|
|
||||||
```
|
|
||||||
http://127.0.0.1:9880/set_gpt_weights?weights_path=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
|
|
||||||
```
|
|
||||||
RESP:
|
|
||||||
成功: 返回"success", http code 200
|
|
||||||
失败: 返回包含错误信息的 json, http code 400
|
|
||||||
|
|
||||||
|
|
||||||
### 切换Sovits模型
|
|
||||||
|
|
||||||
endpoint: `/set_sovits_weights`
|
|
||||||
|
|
||||||
GET:
|
|
||||||
```
|
|
||||||
http://127.0.0.1:9880/set_sovits_weights?weights_path=GPT_SoVITS/pretrained_models/s2G488k.pth
|
|
||||||
```
|
|
||||||
|
|
||||||
RESP:
|
|
||||||
成功: 返回"success", http code 200
|
|
||||||
失败: 返回包含错误信息的 json, http code 400
|
|
||||||
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
from typing import Generator
|
|
||||||
|
|
||||||
now_dir = os.getcwd()
|
|
||||||
sys.path.append(now_dir)
|
|
||||||
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import subprocess
|
|
||||||
import wave
|
|
||||||
import signal
|
|
||||||
import numpy as np
|
|
||||||
import soundfile as sf
|
|
||||||
from fastapi import FastAPI, Request, HTTPException, Response
|
|
||||||
from fastapi.responses import StreamingResponse, JSONResponse
|
|
||||||
from fastapi import FastAPI, UploadFile, File
|
|
||||||
import uvicorn
|
|
||||||
from io import BytesIO
|
|
||||||
from tools.i18n.i18n import I18nAuto
|
|
||||||
from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
|
|
||||||
from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names
|
|
||||||
from fastapi.responses import StreamingResponse
|
|
||||||
from pydantic import BaseModel
|
|
||||||
# print(sys.path)
|
|
||||||
i18n = I18nAuto()
|
|
||||||
cut_method_names = get_cut_method_names()
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
|
|
||||||
parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
|
|
||||||
parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
|
|
||||||
parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880")
|
|
||||||
args = parser.parse_args()
|
|
||||||
config_path = args.tts_config
|
|
||||||
# device = args.device
|
|
||||||
port = args.port
|
|
||||||
host = args.bind_addr
|
|
||||||
argv = sys.argv
|
|
||||||
|
|
||||||
if config_path in [None, ""]:
|
|
||||||
config_path = "GPT-SoVITS/configs/tts_infer.yaml"
|
|
||||||
|
|
||||||
tts_config = TTS_Config(config_path)
|
|
||||||
print(tts_config)
|
|
||||||
tts_pipeline = TTS(tts_config)
|
|
||||||
|
|
||||||
APP = FastAPI()
|
|
||||||
class TTS_Request(BaseModel):
|
|
||||||
text: str = None
|
|
||||||
text_lang: str = None
|
|
||||||
ref_audio_path: str = None
|
|
||||||
aux_ref_audio_paths: list = None
|
|
||||||
prompt_lang: str = None
|
|
||||||
prompt_text: str = ""
|
|
||||||
top_k:int = 5
|
|
||||||
top_p:float = 1
|
|
||||||
temperature:float = 1
|
|
||||||
text_split_method:str = "cut5"
|
|
||||||
batch_size:int = 1
|
|
||||||
batch_threshold:float = 0.75
|
|
||||||
split_bucket:bool = True
|
|
||||||
speed_factor:float = 1.0
|
|
||||||
fragment_interval:float = 0.3
|
|
||||||
seed:int = -1
|
|
||||||
media_type:str = "wav"
|
|
||||||
streaming_mode:bool = False
|
|
||||||
parallel_infer:bool = True
|
|
||||||
repetition_penalty:float = 1.35
|
|
||||||
|
|
||||||
### modify from https://github.com/RVC-Boss/GPT-SoVITS/pull/894/files
|
|
||||||
def pack_ogg(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
|
||||||
with sf.SoundFile(io_buffer, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
|
|
||||||
audio_file.write(data)
|
|
||||||
return io_buffer
|
|
||||||
|
|
||||||
|
|
||||||
def pack_raw(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
|
||||||
io_buffer.write(data.tobytes())
|
|
||||||
return io_buffer
|
|
||||||
|
|
||||||
|
|
||||||
def pack_wav(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
|
||||||
io_buffer = BytesIO()
|
|
||||||
sf.write(io_buffer, data, rate, format='wav')
|
|
||||||
return io_buffer
|
|
||||||
|
|
||||||
def pack_aac(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
|
||||||
process = subprocess.Popen([
|
|
||||||
'ffmpeg',
|
|
||||||
'-f', 's16le', # 输入16位有符号小端整数PCM
|
|
||||||
'-ar', str(rate), # 设置采样率
|
|
||||||
'-ac', '1', # 单声道
|
|
||||||
'-i', 'pipe:0', # 从管道读取输入
|
|
||||||
'-c:a', 'aac', # 音频编码器为AAC
|
|
||||||
'-b:a', '192k', # 比特率
|
|
||||||
'-vn', # 不包含视频
|
|
||||||
'-f', 'adts', # 输出AAC数据流格式
|
|
||||||
'pipe:1' # 将输出写入管道
|
|
||||||
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
out, _ = process.communicate(input=data.tobytes())
|
|
||||||
io_buffer.write(out)
|
|
||||||
return io_buffer
|
|
||||||
|
|
||||||
def pack_audio(io_buffer:BytesIO, data:np.ndarray, rate:int, media_type:str):
|
|
||||||
if media_type == "ogg":
|
|
||||||
io_buffer = pack_ogg(io_buffer, data, rate)
|
|
||||||
elif media_type == "aac":
|
|
||||||
io_buffer = pack_aac(io_buffer, data, rate)
|
|
||||||
elif media_type == "wav":
|
|
||||||
io_buffer = pack_wav(io_buffer, data, rate)
|
|
||||||
else:
|
|
||||||
io_buffer = pack_raw(io_buffer, data, rate)
|
|
||||||
io_buffer.seek(0)
|
|
||||||
return io_buffer
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# from https://huggingface.co/spaces/coqui/voice-chat-with-mistral/blob/main/app.py
|
|
||||||
def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000):
|
|
||||||
# This will create a wave header then append the frame input
|
|
||||||
# It should be first on a streaming wav file
|
|
||||||
# Other frames better should not have it (else you will hear some artifacts each chunk start)
|
|
||||||
wav_buf = BytesIO()
|
|
||||||
with wave.open(wav_buf, "wb") as vfout:
|
|
||||||
vfout.setnchannels(channels)
|
|
||||||
vfout.setsampwidth(sample_width)
|
|
||||||
vfout.setframerate(sample_rate)
|
|
||||||
vfout.writeframes(frame_input)
|
|
||||||
|
|
||||||
wav_buf.seek(0)
|
|
||||||
return wav_buf.read()
|
|
||||||
|
|
||||||
|
|
||||||
def handle_control(command:str):
|
|
||||||
if command == "restart":
|
|
||||||
os.execl(sys.executable, sys.executable, *argv)
|
|
||||||
elif command == "exit":
|
|
||||||
os.kill(os.getpid(), signal.SIGTERM)
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def check_params(req:dict):
|
|
||||||
text:str = req.get("text", "")
|
|
||||||
text_lang:str = req.get("text_lang", "")
|
|
||||||
ref_audio_path:str = req.get("ref_audio_path", "")
|
|
||||||
streaming_mode:bool = req.get("streaming_mode", False)
|
|
||||||
media_type:str = req.get("media_type", "wav")
|
|
||||||
prompt_lang:str = req.get("prompt_lang", "")
|
|
||||||
text_split_method:str = req.get("text_split_method", "cut5")
|
|
||||||
|
|
||||||
if ref_audio_path in [None, ""]:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "ref_audio_path is required"})
|
|
||||||
if text in [None, ""]:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "text is required"})
|
|
||||||
if (text_lang in [None, ""]) :
|
|
||||||
return JSONResponse(status_code=400, content={"message": "text_lang is required"})
|
|
||||||
elif text_lang.lower() not in tts_config.languages:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"})
|
|
||||||
if (prompt_lang in [None, ""]) :
|
|
||||||
return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
|
|
||||||
elif prompt_lang.lower() not in tts_config.languages:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"})
|
|
||||||
if media_type not in ["wav", "raw", "ogg", "aac"]:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
|
|
||||||
elif media_type == "ogg" and not streaming_mode:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
|
|
||||||
|
|
||||||
if text_split_method not in cut_method_names:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"text_split_method:{text_split_method} is not supported"})
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def tts_handle(req:dict):
|
|
||||||
"""
|
|
||||||
Text to speech handler.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
req (dict):
|
|
||||||
{
|
|
||||||
"text": "", # str.(required) text to be synthesized
|
|
||||||
"text_lang: "", # str.(required) language of the text to be synthesized
|
|
||||||
"ref_audio_path": "", # str.(required) reference audio path
|
|
||||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
|
|
||||||
"prompt_text": "", # str.(optional) prompt text for the reference audio
|
|
||||||
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
|
|
||||||
"top_k": 5, # int. top k sampling
|
|
||||||
"top_p": 1, # float. top p sampling
|
|
||||||
"temperature": 1, # float. temperature for sampling
|
|
||||||
"text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
|
|
||||||
"batch_size": 1, # int. batch size for inference
|
|
||||||
"batch_threshold": 0.75, # float. threshold for batch splitting.
|
|
||||||
"split_bucket: True, # bool. whether to split the batch into multiple buckets.
|
|
||||||
"speed_factor":1.0, # float. control the speed of the synthesized audio.
|
|
||||||
"fragment_interval":0.3, # float. to control the interval of the audio fragment.
|
|
||||||
"seed": -1, # int. random seed for reproducibility.
|
|
||||||
"media_type": "wav", # str. media type of the output audio, support "wav", "raw", "ogg", "aac".
|
|
||||||
"streaming_mode": False, # bool. whether to return a streaming response.
|
|
||||||
"parallel_infer": True, # bool.(optional) whether to use parallel inference.
|
|
||||||
"repetition_penalty": 1.35 # float.(optional) repetition penalty for T2S model.
|
|
||||||
}
|
|
||||||
returns:
|
|
||||||
StreamingResponse: audio stream response.
|
|
||||||
"""
|
|
||||||
|
|
||||||
streaming_mode = req.get("streaming_mode", False)
|
|
||||||
return_fragment = req.get("return_fragment", False)
|
|
||||||
media_type = req.get("media_type", "wav")
|
|
||||||
|
|
||||||
check_res = check_params(req)
|
|
||||||
if check_res is not None:
|
|
||||||
return check_res
|
|
||||||
|
|
||||||
if streaming_mode or return_fragment:
|
|
||||||
req["return_fragment"] = True
|
|
||||||
|
|
||||||
try:
|
|
||||||
tts_generator=tts_pipeline.run(req)
|
|
||||||
|
|
||||||
if streaming_mode:
|
|
||||||
def streaming_generator(tts_generator:Generator, media_type:str):
|
|
||||||
if media_type == "wav":
|
|
||||||
yield wave_header_chunk()
|
|
||||||
media_type = "raw"
|
|
||||||
for sr, chunk in tts_generator:
|
|
||||||
yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue()
|
|
||||||
# _media_type = f"audio/{media_type}" if not (streaming_mode and media_type in ["wav", "raw"]) else f"audio/x-{media_type}"
|
|
||||||
return StreamingResponse(streaming_generator(tts_generator, media_type, ), media_type=f"audio/{media_type}")
|
|
||||||
|
|
||||||
else:
|
|
||||||
sr, audio_data = next(tts_generator)
|
|
||||||
audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue()
|
|
||||||
return Response(audio_data, media_type=f"audio/{media_type}")
|
|
||||||
except Exception as e:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"tts failed", "Exception": str(e)})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@APP.get("/control")
|
|
||||||
async def control(command: str = None):
|
|
||||||
if command is None:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "command is required"})
|
|
||||||
handle_control(command)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@APP.get("/tts")
|
|
||||||
async def tts_get_endpoint(
|
|
||||||
text: str = None,
|
|
||||||
text_lang: str = None,
|
|
||||||
ref_audio_path: str = None,
|
|
||||||
aux_ref_audio_paths:list = None,
|
|
||||||
prompt_lang: str = None,
|
|
||||||
prompt_text: str = "",
|
|
||||||
top_k:int = 5,
|
|
||||||
top_p:float = 1,
|
|
||||||
temperature:float = 1,
|
|
||||||
text_split_method:str = "cut0",
|
|
||||||
batch_size:int = 1,
|
|
||||||
batch_threshold:float = 0.75,
|
|
||||||
split_bucket:bool = True,
|
|
||||||
speed_factor:float = 1.0,
|
|
||||||
fragment_interval:float = 0.3,
|
|
||||||
seed:int = -1,
|
|
||||||
media_type:str = "wav",
|
|
||||||
streaming_mode:bool = False,
|
|
||||||
parallel_infer:bool = True,
|
|
||||||
repetition_penalty:float = 1.35
|
|
||||||
):
|
|
||||||
req = {
|
|
||||||
"text": text,
|
|
||||||
"text_lang": text_lang.lower(),
|
|
||||||
"ref_audio_path": ref_audio_path,
|
|
||||||
"aux_ref_audio_paths": aux_ref_audio_paths,
|
|
||||||
"prompt_text": prompt_text,
|
|
||||||
"prompt_lang": prompt_lang.lower(),
|
|
||||||
"top_k": top_k,
|
|
||||||
"top_p": top_p,
|
|
||||||
"temperature": temperature,
|
|
||||||
"text_split_method": text_split_method,
|
|
||||||
"batch_size":int(batch_size),
|
|
||||||
"batch_threshold":float(batch_threshold),
|
|
||||||
"speed_factor":float(speed_factor),
|
|
||||||
"split_bucket":split_bucket,
|
|
||||||
"fragment_interval":fragment_interval,
|
|
||||||
"seed":seed,
|
|
||||||
"media_type":media_type,
|
|
||||||
"streaming_mode":streaming_mode,
|
|
||||||
"parallel_infer":parallel_infer,
|
|
||||||
"repetition_penalty":float(repetition_penalty)
|
|
||||||
}
|
|
||||||
return await tts_handle(req)
|
|
||||||
|
|
||||||
|
|
||||||
@APP.post("/tts")
|
|
||||||
async def tts_post_endpoint(request: TTS_Request):
|
|
||||||
req = request.dict()
|
|
||||||
return await tts_handle(req)
|
|
||||||
|
|
||||||
|
|
||||||
@APP.get("/set_refer_audio")
|
|
||||||
async def set_refer_aduio(refer_audio_path: str = None):
|
|
||||||
try:
|
|
||||||
tts_pipeline.set_ref_audio(refer_audio_path)
|
|
||||||
except Exception as e:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
|
|
||||||
return JSONResponse(status_code=200, content={"message": "success"})
|
|
||||||
|
|
||||||
|
|
||||||
# @APP.post("/set_refer_audio")
|
|
||||||
# async def set_refer_aduio_post(audio_file: UploadFile = File(...)):
|
|
||||||
# try:
|
|
||||||
# # 检查文件类型,确保是音频文件
|
|
||||||
# if not audio_file.content_type.startswith("audio/"):
|
|
||||||
# return JSONResponse(status_code=400, content={"message": "file type is not supported"})
|
|
||||||
|
|
||||||
# os.makedirs("uploaded_audio", exist_ok=True)
|
|
||||||
# save_path = os.path.join("uploaded_audio", audio_file.filename)
|
|
||||||
# # 保存音频文件到服务器上的一个目录
|
|
||||||
# with open(save_path , "wb") as buffer:
|
|
||||||
# buffer.write(await audio_file.read())
|
|
||||||
|
|
||||||
# tts_pipeline.set_ref_audio(save_path)
|
|
||||||
# except Exception as e:
|
|
||||||
# return JSONResponse(status_code=400, content={"message": f"set refer audio failed", "Exception": str(e)})
|
|
||||||
# return JSONResponse(status_code=200, content={"message": "success"})
|
|
||||||
|
|
||||||
@APP.get("/set_gpt_weights")
|
|
||||||
async def set_gpt_weights(weights_path: str = None):
|
|
||||||
try:
|
|
||||||
if weights_path in ["", None]:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "gpt weight path is required"})
|
|
||||||
tts_pipeline.init_t2s_weights(weights_path)
|
|
||||||
except Exception as e:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"change gpt weight failed", "Exception": str(e)})
|
|
||||||
|
|
||||||
return JSONResponse(status_code=200, content={"message": "success"})
|
|
||||||
|
|
||||||
|
|
||||||
@APP.get("/set_sovits_weights")
|
|
||||||
async def set_sovits_weights(weights_path: str = None):
|
|
||||||
try:
|
|
||||||
if weights_path in ["", None]:
|
|
||||||
return JSONResponse(status_code=400, content={"message": "sovits weight path is required"})
|
|
||||||
tts_pipeline.init_vits_weights(weights_path)
|
|
||||||
except Exception as e:
|
|
||||||
return JSONResponse(status_code=400, content={"message": f"change sovits weight failed", "Exception": str(e)})
|
|
||||||
return JSONResponse(status_code=200, content={"message": "success"})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
try:
|
|
||||||
if host == 'None': # 在调用时使用 -a None 参数,可以让api监听双栈
|
|
||||||
host = None
|
|
||||||
uvicorn.run(app=APP, host=host, port=port, workers=1)
|
|
||||||
except Exception as e:
|
|
||||||
traceback.print_exc()
|
|
||||||
os.kill(os.getpid(), signal.SIGTERM)
|
|
||||||
exit(0)
|
|
@ -1,97 +0,0 @@
|
|||||||
{
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 0,
|
|
||||||
"metadata": {
|
|
||||||
"colab": {
|
|
||||||
"provenance": [],
|
|
||||||
"include_colab_link": true
|
|
||||||
},
|
|
||||||
"kernelspec": {
|
|
||||||
"name": "python3",
|
|
||||||
"display_name": "Python 3"
|
|
||||||
},
|
|
||||||
"accelerator": "GPU"
|
|
||||||
},
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"id": "view-in-github",
|
|
||||||
"colab_type": "text"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"<a href=\"https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"source": [
|
|
||||||
"环境配置 environment"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "_o6a8GS2lWQM"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"metadata": {
|
|
||||||
"id": "e9b7iFV3dm1f"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"!pip install -q condacolab\n",
|
|
||||||
"# Setting up condacolab and installing packages\n",
|
|
||||||
"import condacolab\n",
|
|
||||||
"condacolab.install_from_url(\"https://repo.anaconda.com/miniconda/Miniconda3-py39_23.11.0-2-Linux-x86_64.sh\")\n",
|
|
||||||
"%cd -q /content\n",
|
|
||||||
"!git clone https://github.com/RVC-Boss/GPT-SoVITS\n",
|
|
||||||
"!conda install -y -q -c pytorch -c nvidia cudatoolkit\n",
|
|
||||||
"%cd -q /content/GPT-SoVITS\n",
|
|
||||||
"!conda install -y -q -c conda-forge gcc gxx ffmpeg cmake -c pytorch -c nvidia\n",
|
|
||||||
"!/usr/local/bin/pip install -r requirements.txt"
|
|
||||||
],
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"# @title Download pretrained models 下载预训练模型\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/tools/damo_asr/models\n",
|
|
||||||
"!mkdir -p /content/GPT-SoVITS/tools/uvr5\n",
|
|
||||||
"%cd /content/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
|
|
||||||
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
|
|
||||||
"%cd /content/GPT-SoVITS/tools/damo_asr/models\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
|
|
||||||
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
|
|
||||||
"# @title UVR5 pretrains 安装uvr5模型\n",
|
|
||||||
"%cd /content/GPT-SoVITS/tools/uvr5\n",
|
|
||||||
"%rm -r uvr5_weights\n",
|
|
||||||
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
|
|
||||||
"!git config core.sparseCheckout true\n",
|
|
||||||
"!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "0NgxXg5sjv7z"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"# @title launch WebUI 启动WebUI\n",
|
|
||||||
"!/usr/local/bin/pip install ipykernel\n",
|
|
||||||
"!sed -i '10s/False/True/' /content/GPT-SoVITS/config.py\n",
|
|
||||||
"%cd /content/GPT-SoVITS/\n",
|
|
||||||
"!/usr/local/bin/python webui.py"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "4oRGUzkrk8C7"
|
|
||||||
},
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,2 +0,0 @@
|
|||||||
runtime\python.exe webui.py zh_CN
|
|
||||||
pause
|
|
@ -1,4 +0,0 @@
|
|||||||
$ErrorActionPreference = "SilentlyContinue"
|
|
||||||
chcp 65001
|
|
||||||
& "$PSScriptRoot\runtime\python.exe" "$PSScriptRoot\webui.py" zh_CN
|
|
||||||
pause
|
|
265
inference.py
Normal file
265
inference.py
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
from typing import Generator
|
||||||
|
now_dir = os.getcwd()
|
||||||
|
sys.path.append(now_dir)
|
||||||
|
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import wave
|
||||||
|
import signal
|
||||||
|
import numpy as np
|
||||||
|
import soundfile as sf
|
||||||
|
from io import BytesIO
|
||||||
|
from tools.i18n.i18n import I18nAuto
|
||||||
|
from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config
|
||||||
|
from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
i18n = I18nAuto()
|
||||||
|
cut_method_names = get_cut_method_names()
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
|
||||||
|
parser.add_argument("-c", "--tts_config", type=str, default="GPT_SoVITS/configs/tts_infer.yaml", help="tts_infer路径")
|
||||||
|
parser.add_argument("-a", "--bind_addr", type=str, default="127.0.0.1", help="default: 127.0.0.1")
|
||||||
|
parser.add_argument("-p", "--port", type=int, default="9880", help="default: 9880")
|
||||||
|
args = parser.parse_args()
|
||||||
|
config_path = args.tts_config
|
||||||
|
# device = args.device
|
||||||
|
port = args.port
|
||||||
|
host = args.bind_addr
|
||||||
|
argv = sys.argv
|
||||||
|
|
||||||
|
if config_path in [None, ""]:
|
||||||
|
config_path = "GPT-SoVITS/configs/tts_infer.yaml"
|
||||||
|
|
||||||
|
tts_config = TTS_Config(config_path)
|
||||||
|
print(tts_config)
|
||||||
|
tts_pipeline = TTS(tts_config)
|
||||||
|
|
||||||
|
# speaker configuration
|
||||||
|
speakers = {
|
||||||
|
"firefly":{
|
||||||
|
"gpt_model" : "/root/autodl-tmp/GPT-SoVITS/models/GPT_models/firefly_312-e15.ckpt",
|
||||||
|
"sovits_model" : "/root/autodl-tmp/GPT-SoVITS/models/VITS_models/firefly_312_e8_s504.pth",
|
||||||
|
"ref_audio" : "/root/autodl-tmp/GPT-SoVITS/firefly/chapter3_2_firefly_103.wav",
|
||||||
|
"ref_text" : "谢谢,如果没有您出手相助,我真的不知道该怎么办",
|
||||||
|
"ref_language" : "zh",
|
||||||
|
"target_language" : "zh"
|
||||||
|
},
|
||||||
|
"keele":{
|
||||||
|
"gpt_model" : "/root/autodl-tmp/GPT-SoVITS/models/GPT_models/Keele-e15.ckpt",
|
||||||
|
"sovits_model" : "/root/autodl-tmp/GPT-SoVITS/models/VITS_models/Keele_e8_s656.pth",
|
||||||
|
"ref_audio" : "/root/autodl-tmp/GPT-SoVITS/keele/vo_dialog_KLLQ003_klee_03.wav",
|
||||||
|
"ref_text" : "我听说,冒险家协会也有一套冒险的守则,是不是,应该去拜托他们",
|
||||||
|
"ref_language" : "zh",
|
||||||
|
"target_language" : "zh"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# process the output audio type
|
||||||
|
def pack_ogg(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
||||||
|
with sf.SoundFile(io_buffer, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
|
||||||
|
audio_file.write(data)
|
||||||
|
return io_buffer
|
||||||
|
|
||||||
|
def pack_raw(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
||||||
|
io_buffer.write(data.tobytes())
|
||||||
|
return io_buffer
|
||||||
|
|
||||||
|
def pack_wav(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
||||||
|
io_buffer = BytesIO()
|
||||||
|
sf.write(io_buffer, data, rate, format='wav')
|
||||||
|
return io_buffer
|
||||||
|
|
||||||
|
def pack_aac(io_buffer:BytesIO, data:np.ndarray, rate:int):
|
||||||
|
process = subprocess.Popen([
|
||||||
|
'ffmpeg',
|
||||||
|
'-f', 's16le', # 输入16位有符号小端整数PCM
|
||||||
|
'-ar', str(rate), # 设置采样率
|
||||||
|
'-ac', '1', # 单声道
|
||||||
|
'-i', 'pipe:0', # 从管道读取输入
|
||||||
|
'-c:a', 'aac', # 音频编码器为AAC
|
||||||
|
'-b:a', '192k', # 比特率
|
||||||
|
'-vn', # 不包含视频
|
||||||
|
'-f', 'adts', # 输出AAC数据流格式
|
||||||
|
'pipe:1' # 将输出写入管道
|
||||||
|
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
out, _ = process.communicate(input=data.tobytes())
|
||||||
|
io_buffer.write(out)
|
||||||
|
return io_buffer
|
||||||
|
|
||||||
|
def pack_audio(io_buffer:BytesIO, data:np.ndarray, rate:int, media_type:str):
|
||||||
|
if media_type == "ogg":
|
||||||
|
io_buffer = pack_ogg(io_buffer, data, rate)
|
||||||
|
elif media_type == "aac":
|
||||||
|
io_buffer = pack_aac(io_buffer, data, rate)
|
||||||
|
elif media_type == "wav":
|
||||||
|
io_buffer = pack_wav(io_buffer, data, rate)
|
||||||
|
else:
|
||||||
|
io_buffer = pack_raw(io_buffer, data, rate)
|
||||||
|
io_buffer.seek(0)
|
||||||
|
return io_buffer
|
||||||
|
|
||||||
|
# create the audio from text on specific speaker
|
||||||
|
def check_params(req:dict):
|
||||||
|
text:str = req.get("text", "")
|
||||||
|
text_lang:str = req.get("text_lang", "")
|
||||||
|
ref_audio_path:str = req.get("ref_audio_path", "")
|
||||||
|
streaming_mode:bool = req.get("streaming_mode", False)
|
||||||
|
media_type:str = req.get("media_type", "wav")
|
||||||
|
prompt_lang:str = req.get("prompt_lang", "")
|
||||||
|
text_split_method:str = req.get("text_split_method", "cut5")
|
||||||
|
|
||||||
|
if ref_audio_path in [None, ""]:
|
||||||
|
print("ref_audio_path is required")
|
||||||
|
return False
|
||||||
|
if text in [None, ""]:
|
||||||
|
print("text is required")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if (text_lang in [None, ""]) :
|
||||||
|
print("text_lang is required")
|
||||||
|
return False
|
||||||
|
elif text_lang.lower() not in tts_config.languages:
|
||||||
|
print(f"text_lang: {text_lang} is not supported in version {tts_config.version}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if (prompt_lang in [None, ""]) :
|
||||||
|
print("prompt_lang is required")
|
||||||
|
return False
|
||||||
|
elif prompt_lang.lower() not in tts_config.languages:
|
||||||
|
print(f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if media_type not in ["wav", "raw", "ogg", "aac"]:
|
||||||
|
print(f"media_type: {media_type} is not supported")
|
||||||
|
return False
|
||||||
|
elif media_type == "ogg" and not streaming_mode:
|
||||||
|
print("ogg format is not supported in non-streaming mode")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if text_split_method not in cut_method_names:
|
||||||
|
print(f"text_split_method:{text_split_method} is not supported")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=32000):
|
||||||
|
# This will create a wave header then append the frame input
|
||||||
|
# It should be first on a streaming wav file
|
||||||
|
# Other frames better should not have it (else you will hear some artifacts each chunk start)
|
||||||
|
wav_buf = BytesIO()
|
||||||
|
with wave.open(wav_buf, "wb") as vfout:
|
||||||
|
vfout.setnchannels(channels)
|
||||||
|
vfout.setsampwidth(sample_width)
|
||||||
|
vfout.setframerate(sample_rate)
|
||||||
|
vfout.writeframes(frame_input)
|
||||||
|
|
||||||
|
wav_buf.seek(0)
|
||||||
|
return wav_buf.read()
|
||||||
|
|
||||||
|
def tts_handle(req:dict):
|
||||||
|
"""
|
||||||
|
Text to speech handler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
req (dict):
|
||||||
|
{
|
||||||
|
"text": "", # str.(required) text to be synthesized
|
||||||
|
"text_lang: "", # str.(required) language of the text to be synthesized
|
||||||
|
"ref_audio_path": "", # str.(required) reference audio path
|
||||||
|
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
|
||||||
|
"prompt_text": "", # str.(optional) prompt text for the reference audio
|
||||||
|
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
|
||||||
|
"top_k": 5, # int. top k sampling
|
||||||
|
"top_p": 1, # float. top p sampling
|
||||||
|
"temperature": 1, # float. temperature for sampling
|
||||||
|
"text_split_method": "cut5", # str. text split method, see text_segmentation_method.py for details.
|
||||||
|
"batch_size": 1, # int. batch size for inference
|
||||||
|
"batch_threshold": 0.75, # float. threshold for batch splitting.
|
||||||
|
"split_bucket: True, # bool. whether to split the batch into multiple buckets.
|
||||||
|
"speed_factor":1.0, # float. control the speed of the synthesized audio.
|
||||||
|
"fragment_interval":0.3, # float. to control the interval of the audio fragment.
|
||||||
|
"seed": -1, # int. random seed for reproducibility.
|
||||||
|
"media_type": "wav", # str. media type of the output audio, support "wav", "raw", "ogg", "aac".
|
||||||
|
"streaming_mode": False, # bool. whether to return a streaming response.
|
||||||
|
"parallel_infer": True, # bool.(optional) whether to use parallel inference.
|
||||||
|
"repetition_penalty": 1.35 # float.(optional) repetition penalty for T2S model.
|
||||||
|
}
|
||||||
|
returns:
|
||||||
|
StreamingResponse: audio stream response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
streaming_mode = req.get("streaming_mode", False)
|
||||||
|
return_fragment = req.get("return_fragment", False)
|
||||||
|
media_type = req.get("media_type", "wav")
|
||||||
|
|
||||||
|
check_res = check_params(req)
|
||||||
|
if not check_res:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if streaming_mode or return_fragment:
|
||||||
|
req["return_fragment"] = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
tts_generator=tts_pipeline.run(req)
|
||||||
|
|
||||||
|
if streaming_mode:
|
||||||
|
def streaming_generator(tts_generator:Generator, media_type:str):
|
||||||
|
if media_type == "wav":
|
||||||
|
yield wave_header_chunk()
|
||||||
|
media_type = "raw"
|
||||||
|
for sr, chunk in tts_generator:
|
||||||
|
yield pack_audio(BytesIO(), chunk, sr, media_type).getvalue()
|
||||||
|
# _media_type = f"audio/{media_type}" if not (streaming_mode and media_type in ["wav", "raw"]) else f"audio/x-{media_type}"
|
||||||
|
return streaming_generator(tts_generator, media_type, )
|
||||||
|
|
||||||
|
else:
|
||||||
|
sr, audio_data = next(tts_generator)
|
||||||
|
audio_data = pack_audio(BytesIO(), audio_data, sr, media_type).getvalue()
|
||||||
|
return audio_data
|
||||||
|
except Exception as e:
|
||||||
|
print("tts failed, Exception", str(e))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def tts_get_endpoint(text, speaker_id="firefly"):
|
||||||
|
speaker = speakers[speaker_id]
|
||||||
|
tts_pipeline.init_vits_weights(speaker["sovits_model"])
|
||||||
|
tts_pipeline.init_t2s_weights(speaker["gpt_model"])
|
||||||
|
req = {
|
||||||
|
"text" : text,
|
||||||
|
"text_lang" : speaker["target_language"],
|
||||||
|
"ref_audio_path" : speaker["ref_audio"],
|
||||||
|
"aux_ref_audio_paths" : None,
|
||||||
|
"prompt_text" : speaker["ref_text"],
|
||||||
|
"prompt_lang" : speaker["ref_language"],
|
||||||
|
"top_k" : int(15),
|
||||||
|
"top_p" : float(1.0),
|
||||||
|
"temperature" : float(1),
|
||||||
|
"text_split_method" : "cut0",
|
||||||
|
"batch_size" : int(1),
|
||||||
|
"batch_threshold" : float(0.75),
|
||||||
|
"speed_factor" : float(0.75),
|
||||||
|
"split_bucket" : True,
|
||||||
|
"fragment_interval" : float(0.3),
|
||||||
|
"seed" : int(-1),
|
||||||
|
"media_type" : "wav",
|
||||||
|
"streaming_mode" : False,
|
||||||
|
"parallel_infer" : True,
|
||||||
|
"repetition_penalty" : float(1.35)
|
||||||
|
}
|
||||||
|
return tts_handle(req)
|
||||||
|
|
||||||
|
def save_wav(filename, audio_data, sample_rate):
|
||||||
|
with wave.open(filename, 'wb') as wav_file:
|
||||||
|
wav_file.setnchannels(1)
|
||||||
|
wav_file.setsampwidth(2)
|
||||||
|
wav_file.setframerate(sample_rate)
|
||||||
|
wav_file.writeframes(audio_data)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
audio = tts_get_endpoint("我是「罗浮」云骑将军景元。不必拘谨,「将军」只是一时的身份,你称呼我景元便可")
|
||||||
|
save_wav("./output.wav", audio, 32000)
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user