Merge branch 'RVC-Boss:main' into dev

This commit is contained in:
刘洋 2024-06-27 03:05:09 +08:00 committed by GitHub
commit 7e5c48eba2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
50 changed files with 120018 additions and 114286 deletions

2
.gitignore vendored
View File

@ -10,5 +10,3 @@ reference
GPT_weights
SoVITS_weights
TEMP

View File

@ -34,9 +34,6 @@ RUN if [ "$IMAGE_TYPE" != "elite" ]; then \
fi
# Copy the rest of the application
COPY . /workspace
# Copy the rest of the application
COPY . /workspace

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/bucketsampler.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
# reference: https://github.com/lifeiteng/vall-e
import itertools
import math
import random

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/data_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
# reference: https://github.com/lifeiteng/vall-e
from pytorch_lightning import LightningDataModule
from AR.data.bucket_sampler import DistributedBucketSampler
from AR.data.dataset import Text2SemanticDataset

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/t2s_dataset.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
# reference: https://github.com/lifeiteng/vall-e
import pdb
import sys
@ -63,7 +64,7 @@ class Text2SemanticDataset(Dataset):
# get dict
self.path2 = phoneme_path # "%s/2-name2text.txt"%exp_dir#phoneme_path
self.path3 = "%s/3-bert" % (
os.path.basename(phoneme_path)
os.path.dirname(phoneme_path)
) # "%s/3-bert"%exp_dir#bert_dir
self.path6 = semantic_path # "%s/6-name2semantic.tsv"%exp_dir#semantic_path
assert os.path.exists(self.path2)

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
# reference: https://github.com/lifeiteng/vall-e
import os, sys
now_dir = os.getcwd()

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_lightning_module.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
# reference: https://github.com/lifeiteng/vall-e
import os, sys
now_dir = os.getcwd()

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
# reference: https://github.com/lifeiteng/vall-e
import torch
from tqdm import tqdm

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/t2s_model.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
# reference: https://github.com/lifeiteng/vall-e
import torch
from tqdm import tqdm

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/utils.py\
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
# reference: https://github.com/lifeiteng/vall-e
import torch
import torch.nn.functional as F
from typing import Tuple

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/model/lr_schedulers.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
# reference: https://github.com/lifeiteng/vall-e
import math
import torch

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/text_processing/phonemizer.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
# reference: https://github.com/lifeiteng/vall-e
import itertools
import re
from typing import Dict

View File

@ -1,4 +1,5 @@
# modified from https://github.com/feng-yufei/shared_debugging_code/blob/main/text_processing/symbols.py
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
# reference: https://github.com/lifeiteng/vall-e
PAD = "_"
PUNCTUATION = ';:,.!?¡¿—…"«»“” '
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"

View File

@ -0,0 +1,55 @@
import argparse
import os
import soundfile as sf
from tools.i18n.i18n import I18nAuto
from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
i18n = I18nAuto()
def synthesize(GPT_model_path, SoVITS_model_path, ref_audio_path, ref_text_path, ref_language, target_text_path, target_language, output_path):
# Read reference text
with open(ref_text_path, 'r', encoding='utf-8') as file:
ref_text = file.read()
# Read target text
with open(target_text_path, 'r', encoding='utf-8') as file:
target_text = file.read()
# Change model weights
change_gpt_weights(gpt_path=GPT_model_path)
change_sovits_weights(sovits_path=SoVITS_model_path)
# Synthesize audio
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
prompt_text=ref_text,
prompt_language=i18n(ref_language),
text=target_text,
text_language=i18n(target_language), top_p=1, temperature=1)
result_list = list(synthesis_result)
if result_list:
last_sampling_rate, last_audio_data = result_list[-1]
output_wav_path = os.path.join(output_path, "output.wav")
sf.write(output_wav_path, last_audio_data, last_sampling_rate)
print(f"Audio saved to {output_wav_path}")
def main():
parser = argparse.ArgumentParser(description="GPT-SoVITS Command Line Tool")
parser.add_argument('--gpt_model', required=True, help="Path to the GPT model file")
parser.add_argument('--sovits_model', required=True, help="Path to the SoVITS model file")
parser.add_argument('--ref_audio', required=True, help="Path to the reference audio file")
parser.add_argument('--ref_text', required=True, help="Path to the reference text file")
parser.add_argument('--ref_language', required=True, choices=["中文", "英文", "日文"], help="Language of the reference audio")
parser.add_argument('--target_text', required=True, help="Path to the target text file")
parser.add_argument('--target_language', required=True, choices=["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"], help="Language of the target text")
parser.add_argument('--output_path', required=True, help="Path to the output directory")
args = parser.parse_args()
synthesize(args.gpt_model, args.sovits_model, args.ref_audio, args.ref_text, args.ref_language, args.target_text, args.target_language, args.output_path)
if __name__ == '__main__':
main()

View File

@ -1,3 +1,4 @@
import os
import sys
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
@ -7,16 +8,16 @@ import soundfile as sf
from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav
class GPTSoVITSGUI(QMainWindow):
GPT_Path = gpt_path
SoVITS_Path = sovits_path
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
self.setWindowTitle('GPT-SoVITS GUI')
self.setGeometry(800, 450, 950, 850)
@ -71,6 +72,7 @@ class GPTSoVITSGUI(QMainWindow):
self.GPT_model_label = QLabel("选择GPT模型:")
self.GPT_model_input = QLineEdit()
self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
self.GPT_model_input.setText(self.GPT_Path)
self.GPT_model_input.setReadOnly(True)
self.GPT_model_button = QPushButton("选择GPT模型文件")
self.GPT_model_button.clicked.connect(self.select_GPT_model)
@ -78,6 +80,7 @@ class GPTSoVITSGUI(QMainWindow):
self.SoVITS_model_label = QLabel("选择SoVITS模型:")
self.SoVITS_model_input = QLineEdit()
self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
self.SoVITS_model_input.setText(self.SoVITS_Path)
self.SoVITS_model_input.setReadOnly(True)
self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
@ -91,25 +94,25 @@ class GPTSoVITSGUI(QMainWindow):
self.ref_text_label = QLabel("参考音频文本:")
self.ref_text_input = QLineEdit()
self.ref_text_input.setPlaceholderText("拖拽或选择文件")
self.ref_text_input.setReadOnly(True)
self.ref_text_input.setPlaceholderText("直接输入文字或上传文本")
self.ref_text_button = QPushButton("上传文本")
self.ref_text_button.clicked.connect(self.upload_ref_text)
self.language_label = QLabel("参考音频语言:")
self.language_combobox = QComboBox()
self.language_combobox.addItems(["中文", "英文", "日文"])
self.ref_language_label = QLabel("参考音频语言:")
self.ref_language_combobox = QComboBox()
self.ref_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
self.ref_language_combobox.setCurrentText("多语种混合")
self.target_text_label = QLabel("合成目标文本:")
self.target_text_input = QLineEdit()
self.target_text_input.setPlaceholderText("拖拽或选择文件")
self.target_text_input.setReadOnly(True)
self.target_text_input.setPlaceholderText("直接输入文字或上传文本")
self.target_text_button = QPushButton("上传文本")
self.target_text_button.clicked.connect(self.upload_target_text)
self.language_label_02 = QLabel("合成音频语言:")
self.language_combobox_02 = QComboBox()
self.language_combobox_02.addItems(["中文", "英文", "日文"])
self.target_language_label = QLabel("合成音频语言:")
self.target_language_combobox = QComboBox()
self.target_language_combobox.addItems(["中文", "英文", "日文", "中英混合", "日英混合", "多语种混合"])
self.target_language_combobox.setCurrentText("多语种混合")
self.output_label = QLabel("输出音频路径:")
self.output_input = QLineEdit()
@ -140,11 +143,9 @@ class GPTSoVITSGUI(QMainWindow):
main_layout = QVBoxLayout()
input_layout = QGridLayout()
input_layout = QGridLayout(self)
input_layout.setSpacing(10)
self.setLayout(input_layout)
input_layout.addWidget(license_label, 0, 0, 1, 3)
input_layout.addWidget(self.GPT_model_label, 1, 0)
@ -159,14 +160,14 @@ class GPTSoVITSGUI(QMainWindow):
input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
input_layout.addWidget(self.ref_audio_button, 6, 2)
input_layout.addWidget(self.language_label, 7, 0)
input_layout.addWidget(self.language_combobox, 8, 0, 1, 1)
input_layout.addWidget(self.ref_language_label, 7, 0)
input_layout.addWidget(self.ref_language_combobox, 8, 0, 1, 1)
input_layout.addWidget(self.ref_text_label, 9, 0)
input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
input_layout.addWidget(self.ref_text_button, 10, 2)
input_layout.addWidget(self.language_label_02, 11, 0)
input_layout.addWidget(self.language_combobox_02, 12, 0, 1, 1)
input_layout.addWidget(self.target_language_label, 11, 0)
input_layout.addWidget(self.target_language_combobox, 12, 0, 1, 1)
input_layout.addWidget(self.target_text_label, 13, 0)
input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
input_layout.addWidget(self.target_text_button, 14, 2)
@ -198,10 +199,8 @@ class GPTSoVITSGUI(QMainWindow):
def dropEvent(self, event):
if event.mimeData().hasUrls():
file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
@ -211,21 +210,11 @@ class GPTSoVITSGUI(QMainWindow):
widget.installEventFilter(self)
def eventFilter(self, obj, event):
if event.type() == QEvent.DragEnter:
if event.type() in (QEvent.DragEnter, QEvent.Drop):
mime_data = event.mimeData()
if mime_data.hasUrls():
event.acceptProposedAction()
elif event.type() == QEvent.Drop:
mime_data = event.mimeData()
if mime_data.hasUrls():
file_paths = [url.toLocalFile() for url in mime_data.urls()]
if len(file_paths) == 1:
self.update_input_paths(obj, file_paths[0])
else:
self.update_input_paths(obj, ", ".join(file_paths))
event.acceptProposedAction()
return super().eventFilter(obj, event)
def select_GPT_model(self):
@ -239,24 +228,9 @@ class GPTSoVITSGUI(QMainWindow):
self.SoVITS_model_input.setText(file_path)
def select_ref_audio(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.ShowDirsOnly
file_dialog = QFileDialog()
file_dialog.setOptions(options)
file_dialog.setFileMode(QFileDialog.AnyFile)
file_dialog.setNameFilter("Audio Files (*.wav *.mp3)")
if file_dialog.exec_():
file_paths = file_dialog.selectedFiles()
if len(file_paths) == 1:
self.update_ref_audio(file_paths[0])
self.update_input_paths(self.ref_audio_input, file_paths[0])
else:
self.update_ref_audio(", ".join(file_paths))
file_path, _ = QFileDialog.getOpenFileName(self, "选择参考音频文件", "", "Audio Files (*.wav *.mp3)")
if file_path:
self.update_ref_audio(file_path)
def upload_ref_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
@ -264,7 +238,6 @@ class GPTSoVITSGUI(QMainWindow):
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.ref_text_input.setText(content)
self.update_input_paths(self.ref_text_input, file_path)
def upload_target_text(self):
file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
@ -272,7 +245,6 @@ class GPTSoVITSGUI(QMainWindow):
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
self.target_text_input.setText(content)
self.update_input_paths(self.target_text_input, file_path)
def select_output_path(self):
options = QFileDialog.Options()
@ -290,9 +262,6 @@ class GPTSoVITSGUI(QMainWindow):
def update_ref_audio(self, file_path):
self.ref_audio_input.setText(file_path)
def update_input_paths(self, input_box, file_path):
input_box.setText(file_path)
def clear_output(self):
self.output_text.clear()
@ -300,22 +269,26 @@ class GPTSoVITSGUI(QMainWindow):
GPT_model_path = self.GPT_model_input.text()
SoVITS_model_path = self.SoVITS_model_input.text()
ref_audio_path = self.ref_audio_input.text()
language_combobox = self.language_combobox.currentText()
language_combobox = self.ref_language_combobox.currentText()
language_combobox = i18n(language_combobox)
ref_text = self.ref_text_input.text()
language_combobox_02 = self.language_combobox_02.currentText()
language_combobox_02 = i18n(language_combobox_02)
target_language_combobox = self.target_language_combobox.currentText()
target_language_combobox = i18n(target_language_combobox)
target_text = self.target_text_input.text()
output_path = self.output_input.text()
if GPT_model_path != self.GPT_Path:
change_gpt_weights(gpt_path=GPT_model_path)
self.GPT_Path = GPT_model_path
if SoVITS_model_path != self.SoVITS_Path:
change_sovits_weights(sovits_path=SoVITS_model_path)
self.SoVITS_Path = SoVITS_model_path
synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
prompt_text=ref_text,
prompt_language=language_combobox,
text=target_text,
text_language=language_combobox_02)
text_language=target_language_combobox)
result_list = list(synthesis_result)
@ -329,12 +302,9 @@ class GPTSoVITSGUI(QMainWindow):
self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
self.output_text.append("处理结果:\n" + result)
def main():
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = GPTSoVITSGUI()
mainWin.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()

View File

@ -16,6 +16,7 @@ logging.getLogger("asyncio").setLevel(logging.ERROR)
logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
import pdb
import torch
if os.path.exists("./gweight.txt"):
with open("./gweight.txt", 'r', encoding="utf-8") as file:
@ -48,11 +49,12 @@ is_share = os.environ.get("is_share", "False")
is_share = eval(is_share)
if "_CUDA_VISIBLE_DEVICES" in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
is_half = eval(os.environ.get("is_half", "True"))
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
punctuation = set(['!', '?', '', ',', '.', '-'," "])
import gradio as gr
from transformers import AutoModelForMaskedLM, AutoTokenizer
import numpy as np
import librosa, torch
import librosa
from feature_extractor import cnhubert
cnhubert.cnhubert_base_path = cnhubert_base_path
@ -68,7 +70,7 @@ from tools.i18n.i18n import I18nAuto
i18n = I18nAuto()
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
if torch.cuda.is_available():
device = "cuda"
@ -257,9 +259,13 @@ def get_phones_and_bert(text,language):
elif language in {"zh", "ja","auto"}:
textlist=[]
langlist=[]
LangSegment.setfilters(["zh","ja","en"])
LangSegment.setfilters(["zh","ja","en","ko"])
if language == "auto":
for tmp in LangSegment.getTexts(text):
if tmp["lang"] == "ko":
langlist.append("zh")
textlist.append(tmp["text"])
else:
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
else:
@ -317,6 +323,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
if (prompt_text[-1] not in splits): prompt_text += "" if prompt_language != "en" else "."
print(i18n("实际输入的参考文本:"), prompt_text)
text = text.strip("\n")
text = replace_consecutive_punctuation(text)
if (text[0] not in splits and len(get_first(text)) < 4): text = "" + text if text_language != "en" else "." + text
print(i18n("实际输入的目标文本:"), text)
@ -361,6 +368,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
text = text.replace("\n\n", "\n")
print(i18n("实际输入的目标文本(切句后):"), text)
texts = text.split("\n")
texts = process_text(texts)
texts = merge_short_text_in_array(texts, 5)
audio_opt = []
if not ref_free:
@ -458,6 +466,7 @@ def cut1(inp):
opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
else:
opts = [inp]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
@ -482,17 +491,21 @@ def cut2(inp):
if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
opts[-2] = opts[-2] + opts[-1]
opts = opts[:-1]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
def cut3(inp):
inp = inp.strip("\n")
return "\n".join(["%s" % item for item in inp.strip("").split("")])
opts = ["%s" % item for item in inp.strip("").split("")]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
def cut4(inp):
inp = inp.strip("\n")
return "\n".join(["%s" % item for item in inp.strip(".").split(".")])
opts = ["%s" % item for item in inp.strip(".").split(".")]
opts = [item for item in opts if not set(item).issubset(punctuation)]
return "\n".join(opts)
# contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
@ -506,8 +519,8 @@ def cut5(inp):
# 在句子不存在符号或句尾无符号的时候保证文本完整
if len(items)%2 == 1:
mergeitems.append(items[-1])
opt = "\n".join(mergeitems)
return opt
opt = [item for item in mergeitems if not set(item).issubset(punctuation)]
return "\n".join(opt)
def custom_sort_key(s):
@ -517,6 +530,24 @@ def custom_sort_key(s):
parts = [int(part) if part.isdigit() else part for part in parts]
return parts
def process_text(texts):
_text=[]
if all(text in [None, " ", "\n",""] for text in texts):
raise ValueError(i18n("请输入有效文本"))
for text in texts:
if text in [None, " ", ""]:
pass
else:
_text.append(text)
return _text
def replace_consecutive_punctuation(text):
punctuations = ''.join(re.escape(p) for p in punctuation)
pattern = f'([{punctuations}])([{punctuations}])+'
result = re.sub(pattern, r'\1', text)
return result
def change_choices():
SoVITS_names, GPT_names = get_weights_names()
@ -579,7 +610,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
interactive=True,
)
with gr.Row():
gr.Markdown("gpt采样参数(无参考文本时不要太低)")
gr.Markdown(value=i18n("gpt采样参数(无参考文本时不要太低)"))
top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=5,interactive=True)
top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True)
temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True)
@ -608,10 +639,11 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
button5.click(cut5, [text_inp], [text_opt])
gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
app.queue(concurrency_count=511, max_size=1022).launch(
if __name__ == '__main__':
app.queue(concurrency_count=511, max_size=1022).launch(
server_name="0.0.0.0",
inbrowser=True,
share=is_share,
server_port=infer_ttswebui,
quiet=True,
)
)

View File

@ -15,6 +15,7 @@ from module.mrte_model import MRTE
from module.quantize import ResidualVectorQuantizer
from text import symbols
from torch.cuda.amp import autocast
import contextlib
class StochasticDurationPredictor(nn.Module):
@ -890,9 +891,11 @@ class SynthesizerTrn(nn.Module):
self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1)
self.quantizer = ResidualVectorQuantizer(dimension=ssl_dim, n_q=1, bins=1024)
if freeze_quantizer:
self.ssl_proj.requires_grad_(False)
self.quantizer.requires_grad_(False)
self.freeze_quantizer = freeze_quantizer
# if freeze_quantizer:
# self.ssl_proj.requires_grad_(False)
# self.quantizer.requires_grad_(False)
#self.quantizer.eval()
# self.enc_p.text_embedding.requires_grad_(False)
# self.enc_p.encoder_text.requires_grad_(False)
# self.enc_p.mrte.requires_grad_(False)
@ -904,6 +907,11 @@ class SynthesizerTrn(nn.Module):
ge = self.ref_enc(y * y_mask, y_mask)
with autocast(enabled=False):
maybe_no_grad = torch.no_grad() if self.freeze_quantizer else contextlib.nullcontext()
with maybe_no_grad:
if self.freeze_quantizer:
self.ssl_proj.eval()
self.quantizer.eval()
ssl = self.ssl_proj(ssl)
quantized, codes, commit_loss, quantized_list = self.quantizer(
ssl, layers=[0]

View File

@ -49,8 +49,8 @@ if os.path.exists(txt_path) == False:
os.makedirs(bert_dir, exist_ok=True)
if torch.cuda.is_available():
device = "cuda:0"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)
@ -117,9 +117,12 @@ if os.path.exists(txt_path) == False:
try:
wav_name, spk_name, language, text = line.split("|")
# todo.append([name,text,"zh"])
if language in language_v1_to_language_v2.keys():
todo.append(
[wav_name, text, language_v1_to_language_v2.get(language, language)]
)
else:
print(f"\033[33m[Waring] The {language = } of {wav_name} is not supported for training.\033[0m")
except:
print(line, traceback.format_exc())

View File

@ -50,8 +50,8 @@ maxx=0.95
alpha=0.5
if torch.cuda.is_available():
device = "cuda:0"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
model=cnhubert.get_model()
@ -82,7 +82,7 @@ def name2go(wav_name,wav_path):
tensor_wav16 = tensor_wav16.to(device)
ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215])
if np.isnan(ssl.detach().numpy()).sum()!= 0:
nan_fails.append(wav_name)
nan_fails.append((wav_name,wav_path))
print("nan filtered:%s"%wav_name)
return
wavfile.write(
@ -90,7 +90,7 @@ def name2go(wav_name,wav_path):
32000,
tmp_audio32.astype("int16"),
)
my_save(ssl,hubert_path )
my_save(ssl,hubert_path)
with open(inp_text,"r",encoding="utf8")as f:
lines=f.read().strip("\n").split("\n")
@ -113,8 +113,8 @@ for line in lines[int(i_part)::int(all_parts)]:
if(len(nan_fails)>0 and is_half==True):
is_half=False
model=model.float()
for wav_name in nan_fails:
for wav in nan_fails:
try:
name2go(wav_name)
name2go(wav[0],wav[1])
except:
print(wav_name,traceback.format_exc())

View File

@ -40,8 +40,8 @@ if os.path.exists(semantic_path) == False:
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
# elif torch.backends.mps.is_available():
# device = "mps"
else:
device = "cpu"
hps = utils.get_hparams_from_file(s2config_path)

View File

@ -118,16 +118,16 @@ def main(args):
os.environ["MASTER_ADDR"]="localhost"
trainer: Trainer = Trainer(
max_epochs=config["train"]["epochs"],
accelerator="gpu",
accelerator="gpu" if torch.cuda.is_available() else "cpu",
# val_check_interval=9999999999999999999999,###不要验证
# check_val_every_n_epoch=None,
limit_val_batches=0,
devices=-1,
devices=-1 if torch.cuda.is_available() else 1,
benchmark=False,
fast_dev_run=False,
strategy = "auto" if torch.backends.mps.is_available() else DDPStrategy(
strategy = DDPStrategy(
process_group_backend="nccl" if platform.system() != "Windows" else "gloo"
), # mps 不支持多节点训练
) if torch.cuda.is_available() else "auto",
precision=config["train"]["precision"],
logger=logger,
num_sanity_val_steps=0,

View File

@ -41,15 +41,15 @@ torch.set_float32_matmul_precision("medium") # 最低精度但最快(也就
# from config import pretrained_s2G,pretrained_s2D
global_step = 0
device = "cpu" # cuda以外的设备等mps优化后加入
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available() or torch.backends.mps.is_available(), "Only GPU training is allowed."
if torch.backends.mps.is_available():
n_gpus = 1
else:
if torch.cuda.is_available():
n_gpus = torch.cuda.device_count()
else:
n_gpus = 1
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
@ -73,7 +73,7 @@ def run(rank, n_gpus, hps):
writer_eval = SummaryWriter(log_dir=os.path.join(hps.s2_ckpt_dir, "eval"))
dist.init_process_group(
backend = "gloo" if os.name == "nt" or torch.backends.mps.is_available() else "nccl",
backend = "gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl",
init_method="env://",
world_size=n_gpus,
rank=rank,
@ -137,9 +137,9 @@ def run(rank, n_gpus, hps):
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
).to("mps")
).to(device)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to("mps")
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) if torch.cuda.is_available() else MultiPeriodDiscriminator(hps.model.use_spectral_norm).to(device)
for name, param in net_g.named_parameters():
if not param.requires_grad:
print(name, "not requires_grad")
@ -187,8 +187,8 @@ def run(rank, n_gpus, hps):
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
else:
net_g = net_g.to("mps")
net_d = net_d.to("mps")
net_g = net_g.to(device)
net_d = net_d.to(device)
try: # 如果能加载自动resume
_, _, _, epoch_str = utils.load_checkpoint(
@ -305,7 +305,7 @@ def train_and_evaluate(
y_lengths,
text,
text_lengths,
) in tqdm(enumerate(train_loader)):
) in enumerate(tqdm(train_loader)):
if torch.cuda.is_available():
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(
rank, non_blocking=True
@ -320,12 +320,12 @@ def train_and_evaluate(
rank, non_blocking=True
)
else:
spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps")
y, y_lengths = y.to("mps"), y_lengths.to("mps")
ssl = ssl.to("mps")
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
y, y_lengths = y.to(device), y_lengths.to(device)
ssl = ssl.to(device)
ssl.requires_grad = False
# ssl_lengths = ssl_lengths.cuda(rank, non_blocking=True)
text, text_lengths = text.to("mps"), text_lengths.to("mps")
text, text_lengths = text.to(device), text_lengths.to(device)
with autocast(enabled=hps.train.fp16_run):
(
@ -532,10 +532,10 @@ def evaluate(hps, generator, eval_loader, writer_eval):
ssl = ssl.cuda()
text, text_lengths = text.cuda(), text_lengths.cuda()
else:
spec, spec_lengths = spec.to("mps"), spec_lengths.to("mps")
y, y_lengths = y.to("mps"), y_lengths.to("mps")
ssl = ssl.to("mps")
text, text_lengths = text.to("mps"), text_lengths.to("mps")
spec, spec_lengths = spec.to(device), spec_lengths.to(device)
y, y_lengths = y.to(device), y_lengths.to(device)
ssl = ssl.to(device)
text, text_lengths = text.to(device), text_lengths.to(device)
for test in [0, 1]:
y_hat, mask, *_ = generator.module.infer(
ssl, spec, spec_lengths, text, text_lengths, test=test

File diff suppressed because it is too large Load Diff

View File

@ -1 +1,2 @@
CHATGPT CH AE1 T JH IY1 P IY1 T IY1
JSON JH EY1 S AH0 N

Binary file not shown.

View File

@ -1,18 +1,26 @@
import pickle
import os
import re
import wordsegment
from g2p_en import G2p
from string import punctuation
from text import symbols
import unicodedata
from builtins import str as unicode
from g2p_en.expand import normalize_numbers
from nltk.tokenize import TweetTokenizer
word_tokenize = TweetTokenizer().tokenize
from nltk import pos_tag
current_file_path = os.path.dirname(__file__)
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
_g2p = G2p()
NAMECACHE_PATH = os.path.join(current_file_path, "namedict_cache.pickle")
arpa = {
"AH0",
@ -90,7 +98,7 @@ arpa = {
def replace_phs(phs):
rep_map = {";": ",", ":": ",", "'": "-", '"': "-"}
rep_map = {"'": "-"}
phs_new = []
for ph in phs:
if ph in symbols:
@ -112,7 +120,7 @@ def read_dict():
if line_index >= start_line:
line = line.strip()
word_split = line.split(" ")
word = word_split[0]
word = word_split[0].lower()
syllable_split = word_split[1].split(" - ")
g2p_dict[word] = []
@ -132,16 +140,11 @@ def read_dict_new():
line = f.readline()
line_index = 1
while line:
if line_index >= 49:
if line_index >= 57:
line = line.strip()
word_split = line.split(" ")
word = word_split[0]
syllable_split = word_split[1].split(" - ")
g2p_dict[word] = []
for syllable in syllable_split:
phone_split = syllable.split(" ")
g2p_dict[word].append(phone_split)
word = word_split[0].lower()
g2p_dict[word] = [word_split[1].split(" ")]
line_index = line_index + 1
line = f.readline()
@ -153,14 +156,16 @@ def read_dict_new():
if line_index >= 0:
line = line.strip()
word_split = line.split(" ")
word = word_split[0]
word = word_split[0].lower()
if word not in g2p_dict:
g2p_dict[word] = []
g2p_dict[word].append(word_split[1:])
g2p_dict[word] = [word_split[1:]]
line_index = line_index + 1
line = f.readline()
return g2p_dict
def hot_reload_hot(g2p_dict):
with open(CMU_DICT_HOT_PATH) as f:
line = f.readline()
line_index = 1
@ -168,10 +173,9 @@ def read_dict_new():
if line_index >= 0:
line = line.strip()
word_split = line.split(" ")
word = word_split[0]
#if word not in g2p_dict:
g2p_dict[word] = []
g2p_dict[word].append(word_split[1:])
word = word_split[0].lower()
# 自定义发音词直接覆盖字典
g2p_dict[word] = [word_split[1:]]
line_index = line_index + 1
line = f.readline()
@ -192,43 +196,167 @@ def get_dict():
g2p_dict = read_dict_new()
cache_dict(g2p_dict, CACHE_PATH)
g2p_dict = hot_reload_hot(g2p_dict)
return g2p_dict
eng_dict = get_dict()
def get_namedict():
if os.path.exists(NAMECACHE_PATH):
with open(NAMECACHE_PATH, "rb") as pickle_file:
name_dict = pickle.load(pickle_file)
else:
name_dict = {}
return name_dict
def text_normalize(text):
# todo: eng text normalize
return text.replace(";", ",")
# 适配中文及 g2p_en 标点
rep_map = {
"[;:]": ",",
'["]': "'",
"": ".",
"": "!",
"": "?",
}
for p, r in rep_map.items():
text = re.sub(p, r, text)
# 来自 g2p_en 文本格式化处理
# 增加大写兼容
text = unicode(text)
text = normalize_numbers(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text)
if unicodedata.category(char) != 'Mn') # Strip accents
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
text = re.sub(r"(?i)i\.e\.", "that is", text)
text = re.sub(r"(?i)e\.g\.", "for example", text)
return text
class en_G2p(G2p):
def __init__(self):
super().__init__()
# 分词初始化
wordsegment.load()
# 扩展过时字典, 添加姓名字典
self.cmu = get_dict()
self.namedict = get_namedict()
# 剔除读音错误的几个缩写
for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
del self.cmu[word.lower()]
# 修正多音字
self.homograph2features["read"] = (['R', 'IY1', 'D'], ['R', 'EH1', 'D'], 'VBP')
self.homograph2features["complex"] = (['K', 'AH0', 'M', 'P', 'L', 'EH1', 'K', 'S'], ['K', 'AA1', 'M', 'P', 'L', 'EH0', 'K', 'S'], 'JJ')
def __call__(self, text):
# tokenization
words = word_tokenize(text)
tokens = pos_tag(words) # tuples of (word, tag)
# steps
prons = []
for o_word, pos in tokens:
# 还原 g2p_en 小写操作逻辑
word = o_word.lower()
if re.search("[a-z]", word) is None:
pron = [word]
# 先把单字母推出去
elif len(word) == 1:
# 单读 A 发音修正, 这里需要原格式 o_word 判断大写
if o_word == "A":
pron = ['EY1']
else:
pron = self.cmu[word][0]
# g2p_en 原版多音字处理
elif word in self.homograph2features: # Check homograph
pron1, pron2, pos1 = self.homograph2features[word]
if pos.startswith(pos1):
pron = pron1
# pos1比pos长仅出现在read
elif len(pos) < len(pos1) and pos == pos1[:len(pos)]:
pron = pron1
else:
pron = pron2
else:
# 递归查找预测
pron = self.qryword(o_word)
prons.extend(pron)
prons.extend([" "])
return prons[:-1]
def qryword(self, o_word):
word = o_word.lower()
# 查字典, 单字母除外
if len(word) > 1 and word in self.cmu: # lookup CMU dict
return self.cmu[word][0]
# 单词仅首字母大写时查找姓名字典
if o_word.istitle() and word in self.namedict:
return self.namedict[word][0]
# oov 长度小于等于 3 直接读字母
if len(word) <= 3:
phones = []
for w in word:
# 单读 A 发音修正, 此处不存在大写的情况
if w == "a":
phones.extend(['EY1'])
else:
phones.extend(self.cmu[w][0])
return phones
# 尝试分离所有格
if re.match(r"^([a-z]+)('s)$", word):
phones = self.qryword(word[:-2])[:]
# P T K F TH HH 无声辅音结尾 's 发 ['S']
if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']:
phones.extend(['S'])
# S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z']
elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']:
phones.extend(['AH0', 'Z'])
# B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z']
# AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2
# ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z']
else:
phones.extend(['Z'])
return phones
# 尝试进行分词,应对复合词
comps = wordsegment.segment(word.lower())
# 无法分词的送回去预测
if len(comps)==1:
return self.predict(word)
# 可以分词的递归处理
return [phone for comp in comps for phone in self.qryword(comp)]
_g2p = en_G2p()
def g2p(text):
phones = []
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
if w.upper() in eng_dict:
phns = eng_dict[w.upper()]
for ph in phns:
phones += ph
else:
phone_list = list(filter(lambda p: p != " ", _g2p(w)))
for ph in phone_list:
if ph in arpa:
phones.append(ph)
else:
phones.append(ph)
# g2p_en 整段推理剔除不存在的arpa返回
phone_list = _g2p(text)
phones = [ph if ph != "<unk>" else "UNK" for ph in phone_list if ph not in [" ", "<pad>", "UW", "</s>", "<s>"]]
return replace_phs(phones)
if __name__ == "__main__":
# print(get_dict())
print(g2p("hello"))
print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
# all_phones = set()
# for k, syllables in eng_dict.items():
# for group in syllables:
# for ph in group:
# all_phones.add(ph)
# print(all_phones)
print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture.")))
print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")))

Binary file not shown.

View File

@ -106,6 +106,29 @@ def replace_default_num(match):
return verbalize_digit(number, alt_one=True)
# 加减乘除
RE_ASMD = re.compile(
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))([\+\-\×÷=])((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
asmd_map = {
'+': '',
'-': '',
'×': '',
'÷': '',
'=': '等于'
}
def replace_asmd(match) -> str:
"""
Args:
match (re.Match)
Returns:
str
"""
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
return result
# 数字表达式
# 纯小数
RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))')
@ -155,7 +178,13 @@ def replace_number(match) -> str:
# match.group(1) and match.group(8) are copy from RE_NUMBER
RE_RANGE = re.compile(
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))[-~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
r"""
(?<![\d\+\-\×÷=]) # 使用反向前瞻以确保数字范围之前没有其他数字和操作符
((-?)((\d+)(\.\d+)?)) # 匹配范围起始的负数或正数(整数或小数)
[-~] # 匹配范围分隔符
((-?)((\d+)(\.\d+)?)) # 匹配范围结束的负数或正数(整数或小数)
(?![\d\+\-\×÷=]) # 使用正向前瞻以确保数字范围之后没有其他数字和操作符
""", re.VERBOSE)
def replace_range(match) -> str:
@ -165,7 +194,7 @@ def replace_range(match) -> str:
Returns:
str
"""
first, second = match.group(1), match.group(8)
first, second = match.group(1), match.group(6)
first = RE_NUMBER.sub(replace_number, first)
second = RE_NUMBER.sub(replace_number, second)
result = f"{first}{second}"

View File

@ -34,6 +34,7 @@ from .num import RE_PERCENTAGE
from .num import RE_POSITIVE_QUANTIFIERS
from .num import RE_RANGE
from .num import RE_TO_RANGE
from .num import RE_ASMD
from .num import replace_default_num
from .num import replace_frac
from .num import replace_negative_num
@ -42,6 +43,7 @@ from .num import replace_percentage
from .num import replace_positive_quantifier
from .num import replace_range
from .num import replace_to_range
from .num import replace_asmd
from .phonecode import RE_MOBILE_PHONE
from .phonecode import RE_NATIONAL_UNIFORM_NUMBER
from .phonecode import RE_TELEPHONE
@ -67,7 +69,7 @@ class TextNormalizer():
if lang == "zh":
text = text.replace(" ", "")
# 过滤掉特殊字符
text = re.sub(r'[——《》【】<=>{}()#&@“”^_|\\]', '', text)
text = re.sub(r'[——《》【】<>{}()#&@“”^_|\\]', '', text)
text = self.SENTENCE_SPLITOR.sub(r'\1\n', text)
text = text.strip()
sentences = [sentence.strip() for sentence in re.split(r'\n+', text)]
@ -113,7 +115,7 @@ class TextNormalizer():
sentence = sentence.replace('ψ', '普赛').replace('Ψ', '普赛')
sentence = sentence.replace('ω', '欧米伽').replace('Ω', '欧米伽')
# re filter special characters, have one more character "-" than line 68
sentence = re.sub(r'[-——《》【】<=>{}()#&@“”^_|\\]', '', sentence)
sentence = re.sub(r'[-——《》【】<=>{}()#&@“”^_|\\]', '', sentence)
return sentence
def normalize_sentence(self, sentence: str) -> str:
@ -142,6 +144,11 @@ class TextNormalizer():
sentence = RE_NATIONAL_UNIFORM_NUMBER.sub(replace_phone, sentence)
sentence = RE_RANGE.sub(replace_range, sentence)
# 处理加减乘除
while RE_ASMD.search(sentence):
sentence = RE_ASMD.sub(replace_asmd, sentence)
sentence = RE_INTEGER.sub(replace_negative_num, sentence)
sentence = RE_DECIMAL_NUM.sub(replace_number, sentence)
sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier,

151
README.md
View File

@ -8,23 +8,16 @@ A Powerful Few-shot Voice Conversion and Text-to-Speech WebUI.<br><br>
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
[**English**](./README.md) | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) | [**한국어**](./docs/ko/README.md)
**English** | [**中文简体**](./docs/cn/README.md) | [**日本語**](./docs/ja/README.md) | [**한국어**](./docs/ko/README.md) | [**Türkçe**](./docs/tr/README.md)
</div>
---
> Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here!
Unseen speakers few-shot fine-tuning demo:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
For users in China region, you can use AutoDL Cloud Docker to experience the full functionality online: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official
## Features:
1. **Zero-shot TTS:** Input a 5-second vocal sample and experience instant text-to-speech conversion.
@ -35,19 +28,36 @@ For users in China region, you can use AutoDL Cloud Docker to experience the ful
4. **WebUI Tools:** Integrated tools include voice accompaniment separation, automatic training set segmentation, Chinese ASR, and text labeling, assisting beginners in creating training datasets and GPT/SoVITS models.
## Environment Preparation
**Check out our [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw) here!**
If you are a Windows user (tested with win>=10) you can install directly via the prezip. Just download the [prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true), unzip it and double-click go-webui.bat to start GPT-SoVITS-WebUI.
Unseen speakers few-shot fine-tuning demo:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
**User guide: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
## Installation
For users in China region, you can [click here](https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official) to use AutoDL Cloud Docker to experience the full functionality online.
### Tested Environments
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon, GPU)
- Python 3.9, PyTorch 2.2.2, macOS 14.4.1 (Apple silicon)
- Python 3.9, PyTorch 2.2.2, CPU devices
_Note: numba==0.56.4 require py<3.11_
_Note: numba==0.56.4 requires py<3.11_
### Quick Install with Conda
### Windows
If you are a Windows user (tested with win>=10), you can download [the 0206fix3 packedge](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta-fast-inference-branch.7z?download=true) or [the 0217fix2 packedge](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta0217fix2.7z?download=true) and double-click on _go-webui.bat_ to start GPT-SoVITS-WebUI.
Users in China region can download [the 0206fix3 package](https://www.icloud.com.cn/iclouddrive/075NNKIRC2zqnWn-9rhD63WGA#GPT-SoVITS-beta0206fix3) or [the 0217fix2 package](https://www.icloud.com.cn/iclouddrive/091QHaIbZMDZYQg7IX3g2kCqg#GPT-SoVITS-beta0217fix2) by clicking the links and then selecting "Download a copy." (Log out if you encounter errors while downloading.)
_Note: The inference speed of version 0206 is faster, while the inference quality of the new 0217 version is better. You can choose according to your needs._
### Linux
```bash
conda create -n GPTSoVits python=3.9
@ -55,15 +65,30 @@ conda activate GPTSoVits
bash install.sh
```
### macOS
**Note: The models trained with GPUs on Macs result in significantly lower quality compared to those trained on other devices, so we are temporarily using CPUs instead.**
1. Install Xcode command-line tools by running `xcode-select --install`
2. Install FFmpeg by running `brew install ffmpeg` or `conda install ffmpeg`.
3. Install the program by running the following commands:
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### Install Manually
#### Pip Packages
#### Install Dependences
```bash
pip install -r requirements.txt
```
#### FFmpeg
#### Install FFmpeg
##### Conda Users
@ -79,57 +104,10 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS Users
```bash
brew install ffmpeg
```
##### Windows Users
Download and place [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) and [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) in the GPT-SoVITS root.
### Pretrained Models
Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`.
For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`.
Users in China region can download these two models by entering the links below and clicking "Download a copy"
- [GPT-SoVITS Models](https://www.icloud.com.cn/iclouddrive/056y_Xog_HXpALuVUjscIwTtg#GPT-SoVITS_Models)
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`.
### For Mac Users
If you are a Mac user, make sure you meet the following conditions for training and inferencing with GPU:
- Mac computers with Apple silicon
- macOS 12.3 or later
- Xcode command-line tools installed by running `xcode-select --install`
_Other Macs can do inference with CPU only._
Then install by using the following commands:
#### Create Environment
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### Install Requirements
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
### Using Docker
#### docker-compose.yaml configuration
@ -157,6 +135,28 @@ As above, modify the corresponding parameters based on your actual situation, th
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
## Pretrained Models
Download pretrained models from [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) and place them in `GPT_SoVITS/pretrained_models`.
For UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally), download models from [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) and place them in `tools/uvr5/uvr5_weights`.
Users in China region can download these two models by entering the links below and clicking "Download a copy"
- [GPT-SoVITS Models](https://www.icloud.com.cn/iclouddrive/056y_Xog_HXpALuVUjscIwTtg#GPT-SoVITS_Models)
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/asr/models`.
For English or Japanese ASR (additionally), download models from [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) and place them in `tools/asr/models`. Also, [other models](https://huggingface.co/Systran) may have the similar effect with smaller disk footprint.
Users in China region can download this model by entering the links below
- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3) (clicking "Download a copy")
- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3) (HuggingFace mirror site)
## Dataset Format
The TTS annotation .list file format:
@ -182,7 +182,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] **High Priority:**
- [x] Localization in Japanese and English.
- [ ] User guide.
- [x] User guide.
- [x] Japanese and English dataset fine tune training.
- [ ] **Features:**
@ -197,7 +197,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] better sovits base model (enhanced audio quality)
- [ ] model mix
## (Optional) If you need, here will provide the command line operation mode
## (Additional) Method for running from the command line
Use the command line to open the WebUI for UVR5
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
@ -218,35 +218,42 @@ python audio_slicer.py \
```
This is how dataset ASR processing is done using the command line(Only Chinese)
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
python tools/asr/funasr_asr.py -i <input> -o <output>
```
ASR processing is performed through Faster_Whisper(ASR marking except Chinese)
(No progress bars, GPU performance may cause time delays)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
```
A custom list save path is enabled
## Credits
Special thanks to the following projects and contributors:
### Theoretical Research
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### Pretrained Models
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### Text Frontend for Inference
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI Tools
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## Thanks to all contributors for their efforts

552
api.py
View File

@ -13,11 +13,14 @@
`-dt` - `默认参考音频文本`
`-dl` - `默认参考音频语种, "中文","英文","日文","zh","en","ja"`
`-d` - `推理设备, "cuda","cpu","mps"`
`-d` - `推理设备, "cuda","cpu"`
`-a` - `绑定地址, 默认"127.0.0.1"`
`-p` - `绑定端口, 默认9880, 可在 config.py 中指定`
`-fp` - `覆盖 config.py 使用全精度`
`-hp` - `覆盖 config.py 使用半精度`
`-sm` - `流式返回模式, 默认不启用, "close","c", "normal","n", "keepalive","k"`
·-mt` - `返回的音频编码格式, 流式默认ogg, 非流式默认wav, "wav", "ogg", "aac"`
·-cp` - `文本切分符号设定, 默认为空, ",.,。"字符串的方式传入`
`-hb` - `cnhubert路径`
`-b` - `bert路径`
@ -39,6 +42,18 @@ POST:
}
```
使用执行参数指定的参考音频并设定分割符号:
GET:
`http://127.0.0.1:9880?text=先帝创业未半而中道崩殂今天下三分益州疲弊此诚危急存亡之秋也&text_language=zh&cut_punc=`
POST:
```json
{
"text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
"text_language": "zh",
"cut_punc": ",。",
}
```
手动指定当次推理所使用的参考音频:
GET:
`http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三&prompt_language=zh&text=先帝创业未半而中道崩殂今天下三分益州疲弊此诚危急存亡之秋也&text_language=zh`
@ -103,7 +118,7 @@ RESP: 无
import argparse
import os
import os,re
import sys
now_dir = os.getcwd()
@ -111,6 +126,7 @@ sys.path.append(now_dir)
sys.path.append("%s/GPT_SoVITS" % (now_dir))
import signal
import LangSegment
from time import time as ttime
import torch
import librosa
@ -129,35 +145,8 @@ from text.cleaner import clean_text
from module.mel_processing import spectrogram_torch
from my_utils import load_audio
import config as global_config
g_config = global_config.Config()
# AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径")
parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径")
parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径")
parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu / mps")
parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度")
parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度")
# bool值的用法为 `python ./api.py -fp ...`
# 此时 full_precision==True, half_precision==False
parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path")
parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path")
args = parser.parse_args()
sovits_path = args.sovits_path
gpt_path = args.gpt_path
import logging
import subprocess
class DefaultRefer:
@ -170,50 +159,6 @@ class DefaultRefer:
return is_full(self.path, self.text, self.language)
default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language)
device = args.device
port = args.port
host = args.bind_addr
if sovits_path == "":
sovits_path = g_config.pretrained_sovits_path
print(f"[WARN] 未指定SoVITS模型路径, fallback后当前值: {sovits_path}")
if gpt_path == "":
gpt_path = g_config.pretrained_gpt_path
print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}")
# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用
if default_refer.path == "" or default_refer.text == "" or default_refer.language == "":
default_refer.path, default_refer.text, default_refer.language = "", "", ""
print("[INFO] 未指定默认参考音频")
else:
print(f"[INFO] 默认参考音频路径: {default_refer.path}")
print(f"[INFO] 默认参考音频文本: {default_refer.text}")
print(f"[INFO] 默认参考音频语种: {default_refer.language}")
is_half = g_config.is_half
if args.full_precision:
is_half = False
if args.half_precision:
is_half = True
if args.full_precision and args.half_precision:
is_half = g_config.is_half # 炒饭fallback
print(f"[INFO] 半精: {is_half}")
cnhubert_base_path = args.hubert_path
bert_path = args.bert_path
cnhubert.cnhubert_base_path = cnhubert_base_path
tokenizer = AutoTokenizer.from_pretrained(bert_path)
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
if is_half:
bert_model = bert_model.half().to(device)
else:
bert_model = bert_model.to(device)
def is_empty(*items): # 任意一项不为空返回False
for item in items:
if item is not None and item != "":
@ -227,17 +172,19 @@ def is_full(*items): # 任意一项为空返回False
return False
return True
def change_sovits_weights(sovits_path):
global vq_model, hps
dict_s2 = torch.load(sovits_path, map_location="cpu")
hps = dict_s2["config"]
hps = DictToAttrRecursive(hps)
hps.model.semantic_frame_rate = "25hz"
model_params_dict = vars(hps.model)
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model
**model_params_dict
)
if ("pretrained" not in sovits_path):
del vq_model.enc_q
@ -246,9 +193,9 @@ def change_sovits_weights(sovits_path):
else:
vq_model = vq_model.to(device)
vq_model.eval()
print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
with open("./sweight.txt", "w", encoding="utf-8") as f:
f.write(sovits_path)
vq_model.load_state_dict(dict_s2["weight"], strict=False)
def change_gpt_weights(gpt_path):
global hz, max_sec, t2s_model, config
hz = 50
@ -262,8 +209,7 @@ def change_gpt_weights(gpt_path):
t2s_model = t2s_model.to(device)
t2s_model.eval()
total = sum([param.nelement() for param in t2s_model.parameters()])
print("Number of parameter: %.2fM" % (total / 1e6))
with open("./gweight.txt", "w", encoding="utf-8") as f: f.write(gpt_path)
logger.info("Number of parameter: %.2fM" % (total / 1e6))
def get_bert_feature(text, word2ph):
@ -283,9 +229,81 @@ def get_bert_feature(text, word2ph):
return phone_level_feature.T
n_semantic = 1024
dict_s2 = torch.load(sovits_path, map_location="cpu")
hps = dict_s2["config"]
def clean_text_inf(text, language):
phones, word2ph, norm_text = clean_text(text, language)
phones = cleaned_text_to_sequence(phones)
return phones, word2ph, norm_text
def get_bert_inf(phones, word2ph, norm_text, language):
language=language.replace("all_","")
if language == "zh":
bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
else:
bert = torch.zeros(
(1024, len(phones)),
dtype=torch.float16 if is_half == True else torch.float32,
).to(device)
return bert
def get_phones_and_bert(text,language):
if language in {"en","all_zh","all_ja"}:
language = language.replace("all_","")
if language == "en":
LangSegment.setfilters(["en"])
formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
else:
# 因无法区别中日文汉字,以用户输入为准
formattext = text
while " " in formattext:
formattext = formattext.replace(" ", " ")
phones, word2ph, norm_text = clean_text_inf(formattext, language)
if language == "zh":
bert = get_bert_feature(norm_text, word2ph).to(device)
else:
bert = torch.zeros(
(1024, len(phones)),
dtype=torch.float16 if is_half == True else torch.float32,
).to(device)
elif language in {"zh", "ja","auto"}:
textlist=[]
langlist=[]
LangSegment.setfilters(["zh","ja","en","ko"])
if language == "auto":
for tmp in LangSegment.getTexts(text):
if tmp["lang"] == "ko":
langlist.append("zh")
textlist.append(tmp["text"])
else:
langlist.append(tmp["lang"])
textlist.append(tmp["text"])
else:
for tmp in LangSegment.getTexts(text):
if tmp["lang"] == "en":
langlist.append(tmp["lang"])
else:
# 因无法区别中日文汉字,以用户输入为准
langlist.append(language)
textlist.append(tmp["text"])
# logger.info(textlist)
# logger.info(langlist)
phones_list = []
bert_list = []
norm_text_list = []
for i in range(len(textlist)):
lang = langlist[i]
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang)
bert = get_bert_inf(phones, word2ph, norm_text, lang)
phones_list.append(phones)
norm_text_list.append(norm_text)
bert_list.append(bert)
bert = torch.cat(bert_list, dim=1)
phones = sum(phones_list, [])
norm_text = ''.join(norm_text_list)
return phones,bert.to(torch.float16 if is_half == True else torch.float32),norm_text
class DictToAttrRecursive:
@ -298,39 +316,6 @@ class DictToAttrRecursive:
setattr(self, key, value)
hps = DictToAttrRecursive(hps)
hps.model.semantic_frame_rate = "25hz"
dict_s1 = torch.load(gpt_path, map_location="cpu")
config = dict_s1["config"]
ssl_model = cnhubert.get_model()
if is_half:
ssl_model = ssl_model.half().to(device)
else:
ssl_model = ssl_model.to(device)
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model)
if is_half:
vq_model = vq_model.half().to(device)
else:
vq_model = vq_model.to(device)
vq_model.eval()
print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
hz = 50
max_sec = config['data']['max_sec']
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
t2s_model.load_state_dict(dict_s1["weight"])
if is_half:
t2s_model = t2s_model.half()
t2s_model = t2s_model.to(device)
t2s_model.eval()
total = sum([param.nelement() for param in t2s_model.parameters()])
print("Number of parameter: %.2fM" % (total / 1e6))
def get_spepc(hps, filename):
audio = load_audio(filename, int(hps.data.sampling_rate))
audio = torch.FloatTensor(audio)
@ -341,17 +326,124 @@ def get_spepc(hps, filename):
return spec
dict_language = {
"中文": "zh",
"英文": "en",
"日文": "ja",
"ZH": "zh",
"EN": "en",
"JA": "ja",
"zh": "zh",
"en": "en",
"ja": "ja"
}
def pack_audio(audio_bytes, data, rate):
if media_type == "ogg":
audio_bytes = pack_ogg(audio_bytes, data, rate)
elif media_type == "aac":
audio_bytes = pack_aac(audio_bytes, data, rate)
else:
# wav无法流式, 先暂存raw
audio_bytes = pack_raw(audio_bytes, data, rate)
return audio_bytes
def pack_ogg(audio_bytes, data, rate):
# Author: AkagawaTsurunaki
# Issue:
# Stack overflow probabilistically occurs
# when the function `sf_writef_short` of `libsndfile_64bit.dll` is called
# using the Python library `soundfile`
# Note:
# This is an issue related to `libsndfile`, not this project itself.
# It happens when you generate a large audio tensor (about 499804 frames in my PC)
# and try to convert it to an ogg file.
# Related:
# https://github.com/RVC-Boss/GPT-SoVITS/issues/1199
# https://github.com/libsndfile/libsndfile/issues/1023
# https://github.com/bastibe/python-soundfile/issues/396
# Suggestion:
# Or split the whole audio data into smaller audio segment to avoid stack overflow?
def handle_pack_ogg():
with sf.SoundFile(audio_bytes, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
audio_file.write(data)
import threading
# See: https://docs.python.org/3/library/threading.html
# The stack size of this thread is at least 32768
# If stack overflow error still occurs, just modify the `stack_size`.
# stack_size = n * 4096, where n should be a positive integer.
# Here we chose n = 4096.
stack_size = 4096 * 4096
try:
threading.stack_size(stack_size)
pack_ogg_thread = threading.Thread(target=handle_pack_ogg)
pack_ogg_thread.start()
pack_ogg_thread.join()
except RuntimeError as e:
# If changing the thread stack size is unsupported, a RuntimeError is raised.
print("RuntimeError: {}".format(e))
print("Changing the thread stack size is unsupported.")
except ValueError as e:
# If the specified stack size is invalid, a ValueError is raised and the stack size is unmodified.
print("ValueError: {}".format(e))
print("The specified stack size is invalid.")
return audio_bytes
def pack_raw(audio_bytes, data, rate):
audio_bytes.write(data.tobytes())
return audio_bytes
def pack_wav(audio_bytes, rate):
data = np.frombuffer(audio_bytes.getvalue(),dtype=np.int16)
wav_bytes = BytesIO()
sf.write(wav_bytes, data, rate, format='wav')
return wav_bytes
def pack_aac(audio_bytes, data, rate):
process = subprocess.Popen([
'ffmpeg',
'-f', 's16le', # 输入16位有符号小端整数PCM
'-ar', str(rate), # 设置采样率
'-ac', '1', # 单声道
'-i', 'pipe:0', # 从管道读取输入
'-c:a', 'aac', # 音频编码器为AAC
'-b:a', '192k', # 比特率
'-vn', # 不包含视频
'-f', 'adts', # 输出AAC数据流格式
'pipe:1' # 将输出写入管道
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, _ = process.communicate(input=data.tobytes())
audio_bytes.write(out)
return audio_bytes
def read_clean_buffer(audio_bytes):
audio_chunk = audio_bytes.getvalue()
audio_bytes.truncate(0)
audio_bytes.seek(0)
return audio_bytes, audio_chunk
def cut_text(text, punc):
punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "", "", "", "", "", "", "", ""}]
if len(punc_list) > 0:
punds = r"[" + "".join(punc_list) + r"]"
text = text.strip("\n")
items = re.split(f"({punds})", text)
mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])]
# 在句子不存在符号或句尾无符号的时候保证文本完整
if len(items)%2 == 1:
mergeitems.append(items[-1])
text = "\n".join(mergeitems)
while "\n\n" in text:
text = text.replace("\n\n", "\n")
return text
def only_punc(text):
return not any(t.isalnum() or t.isalpha() for t in text)
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language):
@ -374,25 +466,19 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language)
codes = vq_model.extract_latent(ssl_content)
prompt_semantic = codes[0, 0]
t1 = ttime()
prompt_language = dict_language[prompt_language]
text_language = dict_language[text_language]
phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language)
phones1 = cleaned_text_to_sequence(phones1)
prompt_language = dict_language[prompt_language.lower()]
text_language = dict_language[text_language.lower()]
phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language)
texts = text.split("\n")
audio_opt = []
audio_bytes = BytesIO()
for text in texts:
phones2, word2ph2, norm_text2 = clean_text(text, text_language)
phones2 = cleaned_text_to_sequence(phones2)
if (prompt_language == "zh"):
bert1 = get_bert_feature(norm_text1, word2ph1).to(device)
else:
bert1 = torch.zeros((1024, len(phones1)), dtype=torch.float16 if is_half == True else torch.float32).to(
device)
if (text_language == "zh"):
bert2 = get_bert_feature(norm_text2, word2ph2).to(device)
else:
bert2 = torch.zeros((1024, len(phones2))).to(bert1)
# 简单防止纯符号引发参考音频泄露
if only_punc(text):
continue
audio_opt = []
phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language)
bert = torch.cat([bert1, bert2], 1)
all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
@ -426,8 +512,17 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language)
audio_opt.append(audio)
audio_opt.append(zero_wav)
t4 = ttime()
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16)
audio_bytes = pack_audio(audio_bytes,(np.concatenate(audio_opt, 0) * 32768).astype(np.int16),hps.data.sampling_rate)
# logger.info("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
if stream_mode == "normal":
audio_bytes, audio_chunk = read_clean_buffer(audio_bytes)
yield audio_chunk
if not stream_mode == "normal":
if media_type == "wav":
audio_bytes = pack_wav(audio_bytes,hps.data.sampling_rate)
yield audio_bytes.getvalue()
def handle_control(command):
@ -449,15 +544,16 @@ def handle_change(path, text, language):
if language != "" or language is not None:
default_refer.language = language
print(f"[INFO] 当前默认参考音频路径: {default_refer.path}")
print(f"[INFO] 当前默认参考音频文本: {default_refer.text}")
print(f"[INFO] 当前默认参考音频语种: {default_refer.language}")
print(f"[INFO] is_ready: {default_refer.is_ready()}")
logger.info(f"当前默认参考音频路径: {default_refer.path}")
logger.info(f"当前默认参考音频文本: {default_refer.text}")
logger.info(f"当前默认参考音频语种: {default_refer.language}")
logger.info(f"is_ready: {default_refer.is_ready()}")
return JSONResponse({"code": 0, "message": "Success"}, status_code=200)
def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
def handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc):
if (
refer_wav_path == "" or refer_wav_path is None
or prompt_text == "" or prompt_text is None
@ -471,27 +567,141 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
if not default_refer.is_ready():
return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400)
with torch.no_grad():
gen = get_tts_wav(
refer_wav_path, prompt_text, prompt_language, text, text_language
)
sampling_rate, audio_data = next(gen)
if cut_punc == None:
text = cut_text(text,default_cut_punc)
else:
text = cut_text(text,cut_punc)
wav = BytesIO()
sf.write(wav, audio_data, sampling_rate, format="wav")
wav.seek(0)
torch.cuda.empty_cache()
if device == "mps":
print('executed torch.mps.empty_cache()')
torch.mps.empty_cache()
return StreamingResponse(wav, media_type="audio/wav")
return StreamingResponse(get_tts_wav(refer_wav_path, prompt_text, prompt_language, text, text_language), media_type="audio/"+media_type)
# --------------------------------
# 初始化部分
# --------------------------------
dict_language = {
"中文": "all_zh",
"英文": "en",
"日文": "all_ja",
"中英混合": "zh",
"日英混合": "ja",
"多语种混合": "auto", #多语种启动切分识别语种
"all_zh": "all_zh",
"en": "en",
"all_ja": "all_ja",
"zh": "zh",
"ja": "ja",
"auto": "auto",
}
# logger
logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG)
logger = logging.getLogger('uvicorn')
# 获取配置
g_config = global_config.Config()
# 获取参数
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径")
parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径")
parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径")
parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu")
parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度")
parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度")
# bool值的用法为 `python ./api.py -fp ...`
# 此时 full_precision==True, half_precision==False
parser.add_argument("-sm", "--stream_mode", type=str, default="close", help="流式返回模式, close / normal / keepalive")
parser.add_argument("-mt", "--media_type", type=str, default="wav", help="音频编码格式, wav / ogg / aac")
parser.add_argument("-cp", "--cut_punc", type=str, default="", help="文本切分符号设定, 符号范围,.;?!、,。?!;:…")
# 切割常用分句符为 `python ./api.py -cp ".?!。?!"`
parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path")
parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path")
args = parser.parse_args()
sovits_path = args.sovits_path
gpt_path = args.gpt_path
device = args.device
port = args.port
host = args.bind_addr
cnhubert_base_path = args.hubert_path
bert_path = args.bert_path
default_cut_punc = args.cut_punc
# 应用参数配置
default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language)
# 模型路径检查
if sovits_path == "":
sovits_path = g_config.pretrained_sovits_path
logger.warn(f"未指定SoVITS模型路径, fallback后当前值: {sovits_path}")
if gpt_path == "":
gpt_path = g_config.pretrained_gpt_path
logger.warn(f"未指定GPT模型路径, fallback后当前值: {gpt_path}")
# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用
if default_refer.path == "" or default_refer.text == "" or default_refer.language == "":
default_refer.path, default_refer.text, default_refer.language = "", "", ""
logger.info("未指定默认参考音频")
else:
logger.info(f"默认参考音频路径: {default_refer.path}")
logger.info(f"默认参考音频文本: {default_refer.text}")
logger.info(f"默认参考音频语种: {default_refer.language}")
# 获取半精度
is_half = g_config.is_half
if args.full_precision:
is_half = False
if args.half_precision:
is_half = True
if args.full_precision and args.half_precision:
is_half = g_config.is_half # 炒饭fallback
logger.info(f"半精: {is_half}")
# 流式返回模式
if args.stream_mode.lower() in ["normal","n"]:
stream_mode = "normal"
logger.info("流式返回已开启")
else:
stream_mode = "close"
# 音频编码格式
if args.media_type.lower() in ["aac","ogg"]:
media_type = args.media_type.lower()
elif stream_mode == "close":
media_type = "wav"
else:
media_type = "ogg"
logger.info(f"编码格式: {media_type}")
# 初始化模型
cnhubert.cnhubert_base_path = cnhubert_base_path
tokenizer = AutoTokenizer.from_pretrained(bert_path)
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
ssl_model = cnhubert.get_model()
if is_half:
bert_model = bert_model.half().to(device)
ssl_model = ssl_model.half().to(device)
else:
bert_model = bert_model.to(device)
ssl_model = ssl_model.to(device)
change_sovits_weights(sovits_path)
change_gpt_weights(gpt_path)
# --------------------------------
# 接口部分
# --------------------------------
app = FastAPI()
#clark新增-----2024-02-21
#可在启动后动态修改模型以此满足同一个api不同的朗读者请求
@app.post("/set_model")
async def set_model(request: Request):
json_post_raw = await request.json()
@ -499,11 +709,11 @@ async def set_model(request: Request):
gpt_path=json_post_raw.get("gpt_model_path")
global sovits_path
sovits_path=json_post_raw.get("sovits_model_path")
print("gptpath"+gpt_path+";vitspath"+sovits_path)
logger.info("gptpath"+gpt_path+";vitspath"+sovits_path)
change_sovits_weights(sovits_path)
change_gpt_weights(gpt_path)
return "ok"
# 新增-----end------
@app.post("/control")
async def control(request: Request):
@ -544,6 +754,7 @@ async def tts_endpoint(request: Request):
json_post_raw.get("prompt_language"),
json_post_raw.get("text"),
json_post_raw.get("text_language"),
json_post_raw.get("cut_punc"),
)
@ -554,8 +765,9 @@ async def tts_endpoint(
prompt_language: str = None,
text: str = None,
text_language: str = None,
cut_punc: str = None,
):
return handle(refer_wav_path, prompt_text, prompt_language, text, text_language)
return handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc)
if __name__ == "__main__":

View File

@ -67,6 +67,7 @@
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",
"# @title UVR5 pretrains 安装uvr5模型\n",
"%cd /content/GPT-SoVITS/tools/uvr5\n",
"%rm -r uvr5_weights\n",
"!git clone https://huggingface.co/Delik/uvr5_weights\n",
"!git config core.sparseCheckout true\n",
"!mv /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/GPT-SoVITS/* /content/GPT-SoVITS/GPT_SoVITS/pretrained_models/"

View File

@ -127,7 +127,7 @@
### 20240221更新
1-数据处理添加语音降噪选项
1-数据处理添加语音降噪选项降噪为只剩16k采样率除非底噪很大先不急着用哦。
2-中文日文前端处理优化 https://github.com/RVC-Boss/GPT-SoVITS/pull/559 https://github.com/RVC-Boss/GPT-SoVITS/pull/556 https://github.com/RVC-Boss/GPT-SoVITS/pull/532 https://github.com/RVC-Boss/GPT-SoVITS/pull/507 https://github.com/RVC-Boss/GPT-SoVITS/pull/509
@ -135,9 +135,62 @@
4-colab修复不开启公网url
### 20240306更新
1-推理加速50%RTX3090+pytorch2.2.1+cu11.8+win10+py39 testedhttps://github.com/RVC-Boss/GPT-SoVITS/pull/672
2-如果用faster whisper非中文ASR不再需要先下中文funasr模型
3-修复uvr5去混响模型 是否混响 反的 https://github.com/RVC-Boss/GPT-SoVITS/pull/610
4-faster whisper如果无cuda可用自动cpu推理 https://github.com/RVC-Boss/GPT-SoVITS/pull/675
5-修改is_half的判断使在Mac上能正常CPU推理 https://github.com/RVC-Boss/GPT-SoVITS/pull/573
### 202403/202404/202405更新
2个重点
1-修复sovits训练未冻结vq的问题可能造成效果下降
2-增加一个快速推理分支
以下都是小修补
1-修复无参考文本模式问题
2-优化中英文文本前端
3-api格式优化
4-cmd格式问题修复
5-训练数据处理阶段不支持的语言提示报错
6-nan自动转fp32阶段的hubert提取bug修复
### 20240610
小问题修复:
1-完善纯标点、多标点文本输入的判断逻辑 https://github.com/RVC-Boss/GPT-SoVITS/pull/1168 https://github.com/RVC-Boss/GPT-SoVITS/pull/1169
2-uvr5中的mdxnet去混响cmd格式修复兼容路径带空格 [#501a74a](https://github.com/RVC-Boss/GPT-SoVITS/commit/501a74ae96789a26b48932babed5eb4e9483a232)
3-s2训练进度条逻辑修复 https://github.com/RVC-Boss/GPT-SoVITS/pull/1159
大问题修复:
4-修复了webui的GPT中文微调没读到bert导致和推理不一致训练太多可能效果还会变差的问题。如果大量数据微调的建议重新微调模型得到质量优化 [#99f09c8](https://github.com/RVC-Boss/GPT-SoVITS/commit/99f09c8bdc155c1f4272b511940717705509582a)
todolist
1-中文多音字推理优化
1-中文多音字推理优化(有没有人来测试的欢迎把测试结果写在pr评论区里) https://github.com/RVC-Boss/GPT-SoVITS/pull/488
v2底模训练已经合了下个版本发布就要合了
2-正在尝试解决低音质参考音频导致音质较差的问题v2再试试如果能解决就发了节点暂定高考后吧

View File

@ -8,21 +8,16 @@
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
[**English**](../../README.md) | [**中文简体**](./README.md) | [**日本語**](../ja/README.md) | [**한국어**](../ko/README.md)
[**English**](../../README.md) | **中文简体** | [**日本語**](../ja/README.md) | [**한국어**](../ko/README.md) | [**Türkçe**](../tr/README.md)
</div>
---
> 查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
中国地区用户可使用 AutoDL 云端镜像进行体验https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official
## 功能:
1. **零样本文本到语音TTS** 输入 5 秒的声音样本,即刻体验文本到语音转换。
@ -33,46 +28,36 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-
4. **WebUI 工具:** 集成工具包括声音伴奏分离、自动训练集分割、中文自动语音识别(ASR)和文本标注,协助初学者创建训练数据集和 GPT/SoVITS 模型。
## 环境准备
**查看我们的介绍视频 [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)**
如果你是 Windows 用户(已在 win>=10 上测试),可以直接通过预打包文件安装。只需下载[预打包文件](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true),解压后双击 go-webui.bat 即可启动 GPT-SoVITS-WebUI。
未见过的说话者 few-shot 微调演示:
### 测试通过的 Python 和 PyTorch 版本
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
- Python 3.9、PyTorch 2.0.1 和 CUDA 11
- Python 3.10.13, PyTorch 2.1.2 和 CUDA 12.3
- Python 3.9、Pytorch 2.3.0.dev20240122 和 macOS 14.3Apple 芯片GPU
**用户手册: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
_注意: numba==0.56.4 需要 python<3.11_
## 安装
### Mac 用户
中国地区用户可[点击此处](https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official)使用 AutoDL 云端镜像进行体验。
如果你是 Mac 用户,请先确保满足以下条件以使用 GPU 进行训练和推理:
### 测试通过的环境
- 搭载 Apple 芯片的 Mac
- macOS 12.3 或更高版本
- 已通过运行`xcode-select --install`安装 Xcode command-line tools
- Python 3.9PyTorch 2.0.1CUDA 11
- Python 3.10.13PyTorch 2.1.2CUDA 12.3
- Python 3.9Pytorch 2.2.2macOS 14.4.1Apple 芯片)
- Python 3.9PyTorch 2.2.2CPU 设备
_其他 Mac 仅支持使用 CPU 进行推理_
_注: numba==0.56.4 需要 python<3.11_
然后使用以下命令安装:
### Windows
#### 创建环境
如果你是 Windows 用户(已在 win>=10 上测试),可以下载[0206fix3 整合包](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta-fast-inference-branch.7z?download=true)或[0217fix2 整合包](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta0217fix2.7z?download=true),解压后双击 go-webui.bat 即可启动 GPT-SoVITS-WebUI。
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
中国地区用户可以通过点击链接并选择“下载副本”来下载[0206fix3 整合包](https://www.icloud.com.cn/iclouddrive/075NNKIRC2zqnWn-9rhD63WGA#GPT-SoVITS-beta0206fix3)或[0217fix2 整合包](https://www.icloud.com.cn/iclouddrive/091QHaIbZMDZYQg7IX3g2kCqg#GPT-SoVITS-beta0217fix2)。(如果下载时遇到错误,请退出登录)
#### 安装依赖
_注0206版本的推理速度更快0217新版的推理效果更好可按需选择_
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
### 使用 Conda 快速安装
### Linux
```bash
conda create -n GPTSoVits python=3.9
@ -80,15 +65,30 @@ conda activate GPTSoVits
bash install.sh
```
### 手动安装包
### macOS
#### Pip 包
**注:在 Mac 上使用 GPU 训练的模型效果显著低于其他设备训练的模型所以我们暂时使用CPU进行训练。**
1. 运行 `xcode-select --install` 安装 Xcode command-line tools。
2. 运行 `brew install ffmpeg``conda install ffmpeg` 安装 FFmpeg。
3. 完成上述步骤后,运行以下的命令来安装本项目:
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 手动安装
#### 安装依赖
```bash
pip install -r requirements.txt
```
#### FFmpeg
#### 安装 FFmpeg
##### Conda 使用者
@ -104,12 +104,6 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS 使用者
```bash
brew install ffmpeg
```
##### Windows 使用者
下载并将 [ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) 和 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) 放置在 GPT-SoVITS 根目录下。
@ -141,11 +135,11 @@ docker compose -f "docker-compose.yaml" up -d
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
### 预训练模型
## 预训练模型
从 [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) 下载预训练模型,并将它们放置在 `GPT_SoVITS\pretrained_models` 中。
对于 UVR5人声/伴奏分离和混响移除,另外),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。
对于 UVR5人声/伴奏分离和混响移除,附加),从 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) 下载模型,并将它们放置在 `tools/uvr5/uvr5_weights` 中。
中国地区用户可以进入以下链接并点击“下载副本”下载以上两个模型:
@ -153,7 +147,15 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
对于中文自动语音识别(另外),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。
对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/asr/models` 中。
对于英语与日语自动语音识别(附加),从 [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) 下载模型,并将它们放置在 `tools/asr/models` 中。 此外,[其他模型](https://huggingface.co/Systran)可能具有类似效果,但占用更小的磁盘空间。
中国地区用户可以通过以下链接下载:
- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3)(点击“下载副本”)
- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3)(Hugging Face镜像站)
## 数据集格式
@ -183,7 +185,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] 用户指南。
- [x] 日语和英语数据集微调训练。
- [ ] **Features:**
- [ ] **功能:**
- [ ] 零样本声音转换5 秒)/ 少样本声音转换1 分钟)。
- [ ] TTS 语速控制。
- [ ] 增强的 TTS 情感控制。
@ -195,7 +197,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] 更好的 sovits 基础模型(增强的音频质量)。
- [ ] 模型混合。
## 可选)命令行的操作方式
## 附加)命令行运行方式
使用命令行打开UVR5的WebUI
````
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
@ -216,33 +218,42 @@ python audio_slicer.py \
````
这是使用命令行完成数据集ASR处理的方式仅限中文
````
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
python tools/asr/funasr_asr.py -i <input> -o <output>
````
通过Faster_Whisper进行ASR处理除中文之外的ASR标记
没有进度条GPU性能可能会导致时间延迟
````
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
````
启用自定义列表保存路径
## 致谢
特别感谢以下项目和贡献者:
### 理论研究
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### 预训练模型
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### 推理用文本前端
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI 工具
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## 感谢所有贡献者的努力

View File

@ -8,19 +8,16 @@
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](./README.md) | [**한국어**](../ko/README.md)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | **日本語** | [**한국어**](../ko/README.md) | [**Türkçe**](../tr/README.md)
</div>
---
> [デモ動画](https://www.bilibili.com/video/BV12g4y1m7Uw)をチェック!
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
## 機能:
1. **ゼロショット TTS:** 5 秒間のボーカルサンプルを入力すると、即座にテキストから音声に変換されます。
@ -31,48 +28,32 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-
4. **WebUI ツール:** 統合されたツールには、音声伴奏の分離、トレーニングセットの自動セグメンテーション、中国語 ASR、テキストラベリングが含まれ、初心者がトレーニングデータセットと GPT/SoVITS モデルを作成するのを支援します。
## 環境の準備
**[デモ動画](https://www.bilibili.com/video/BV12g4y1m7Uw)をチェック!**
Windows ユーザーであればwin>=10 にてテスト済み、prezip 経由で直接インストールできます。[prezip](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta.7z?download=true) をダウンロードして解凍し、go-webui.bat をダブルクリックするだけで GPT-SoVITS-WebUI が起動します。
未見の話者数ショット微調整デモ:
### Python と PyTorch のバージョン
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
**ユーザーマニュアル: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
## インストール
### テスト済みの環境
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, PyTorch 2.3.0.dev20240122, macOS 14.3 (Apple silicon, GPU)
- Python 3.9, PyTorch 2.2.2, macOS 14.4.1 (Apple silicon)
- Python 3.9, PyTorch 2.2.2, CPUデバイス
_注記: numba==0.56.4 は py<3.11 が必要です_
### Mac ユーザーへ
### Windows
如果あなたが Mac ユーザーである場合、GPU を使用してトレーニングおよび推論を行うために以下の条件を満たしていることを確認してください:
Windows ユーザーの場合win>=10 でテスト済み)、[0206fix3 パッケージ](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta-fast-inference-branch.7z?download=true) または [0217fix2 パッケージ](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta0217fix2.7z?download=true) をダウンロードして、解凍後に _go-webui.bat_ をダブルクリックするだけで GPT-SoVITS-WebUI を起動できます。
- Apple シリコンを搭載した Mac コンピューター
- macOS 12.3 以降
- `xcode-select --install`を実行してインストールされた Xcode コマンドラインツール
_注0206バージョンの推論速度は速いですが、0217の新バージョンの推論品質は優れています。必要に応じて選択してください。_
_その他の Mac は CPU のみで推論を行うことができます。_
次に、以下のコマンドを使用してインストールします:
#### 環境作成
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### Pip パッケージ
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
_注記: UVR5 を使用して前処理を行う場合は、[オリジナルプロジェクトの GUI をダウンロード](https://github.com/Anjok07/ultimatevocalremovergui)して、「GPU Conversion」を選択することをお勧めします。さらに、特に推論時にメモリリークの問題が発生する可能性があります。推論 webUI を再起動することでメモリを解放することができます。_
### Conda によるクイックインストール
### Linux
```bash
conda create -n GPTSoVits python=3.9
@ -80,15 +61,30 @@ conda activate GPTSoVits
bash install.sh
```
### macOS
**注MacでGPUを使用して訓練されたモデルは、他のデバイスで訓練されたモデルと比較して著しく品質が低下するため、当面はCPUを使用して訓練します。**
1. `xcode-select --install` を実行して、Xcodeコマンドラインツールをインストールします。
2. `brew install ffmpeg` または `conda install ffmpeg` を実行して、FFmpegをインストールします。
3. 上記の手順を完了した後、以下のコマンドを実行してこのプロジェクトをインストールします。
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 手動インストール
#### Pip パッケージ
#### 依存関係をインストールします
```bash
pip install -r requirementx.txt
```
#### FFmpeg
#### FFmpegをインストールします。
##### Conda ユーザー
@ -104,12 +100,6 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS ユーザー
```bash
brew install ffmpeg
```
##### Windows ユーザー
[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) と [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) をダウンロードし、GPT-SoVITS のルートディレクトリに置きます。
@ -141,11 +131,11 @@ docker compose -f "docker-compose.yaml" up -d
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
### 事前訓練済みモデル
## 事前訓練済みモデル
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。
中国語 ASR追加については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/damo_asr/models` に置いてください。
中国語 ASR追加については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/asr/models` に置いてください。
UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。
@ -189,7 +179,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] より良い sovits ベースモデル(音質向上)
- [ ] モデルミックス
## (オプション) 必要に応じて、コマンドライン操作モードが提供されます。
## (追加の) コマンドラインから実行する方法
コマンド ラインを使用して UVR5 の WebUI を開きます
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
@ -210,33 +200,42 @@ python audio_slicer.py \
```
コマンドラインを使用してデータセット ASR 処理を行う方法です (中国語のみ)
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
python tools/asr/funasr_asr.py -i <input> -o <output>
```
ASR処理はFaster_Whisperを通じて実行されます(中国語を除くASRマーキング)
(進行状況バーは表示されません。GPU のパフォーマンスにより時間遅延が発生する可能性があります)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
```
カスタムリストの保存パスが有効になっています
## クレジット
以下のプロジェクトとコントリビューターに感謝します:
特に以下のプロジェクトと貢献者に感謝します:
### 理論研究
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### 事前学習モデル
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### 推論用テキストフロントエンド
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI ツール
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## すべてのコントリビューターに感謝します

View File

@ -89,6 +89,21 @@
1. 참조 텍스트 입력을 지원합니다.
2. 프론트엔드에 있던 중국어 텍스트 입력 버그를 수정하였습니다.
todolist :
### 20240221 업데이트
1. 중국어 다음음자 추론 최적화
1. 데이터 처리에 음성 노이즈 감소 옵션을 추가하였습니다. (노이즈 감소는 16k 샘플링률만 남기며, 노이즈가 크지 않다면 사용하지 마십시오.)
2. 중국어 및 일본어 프론트엔드 처리를 최적화하였습니다. https://github.com/RVC-Boss/GPT-SoVITS/pull/559 https://github.com/RVC-Boss/GPT-SoVITS/pull/556 https://github.com/RVC-Boss/GPT-SoVITS/pull/532 https://github.com/RVC-Boss/GPT-SoVITS/pull/507 https://github.com/RVC-Boss/GPT-SoVITS/pull/509
3. Mac에서 CPU 추론이 더 빨라졌으므로 추론 장치를 mps에서 CPU로 변경하였습니다.
4. colab에서 공용 URL을 열지 않는 문제를 수정하였습니다.
### 20240306 업데이트
1. 추론 속도를 50% 빠르게 하였습니다. (RTX3090+pytorch2.2.1+cu11.8+win10+py39 테스트 완료) https://github.com/RVC-Boss/GPT-SoVITS/pull/672
2. faster whisper를 사용할 때 중국어 ASR을 먼저 다운로드할 필요가 없습니다.
3. uvr5의 잔향 제거 모델이 잔향이 있는지 여부를 반대로 반환하는 문제를 수정하였습니다.
4. faster whisper가 CUDA를 사용할 수 없는 경우 자동으로 CPU 추론을 사용하도록 수정하였습니다.
5. is_half의 판단을 수정하여 Mac에서 CPU 추론이 정상적으로 작동하도록 수정하였습니다.
todolist
1. 중국어 다양한 발음 단어 추론 최적화(테스트 결과를 작성하시는 분은 pr 코멘트 영역에 작성해주시면 감사하겠습니다)

View File

@ -8,21 +8,16 @@
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](../ja/README.md) | [**한국어**](./README.md)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](../ja/README.md) | **한국어** | [**Türkçe**](../tr/README.md)
</div>
---
> 데모 비디오를 확인하세요! [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
중국 지역의 사용자는 AutoDL 클라우드 이미지를 사용하여 체험할 수 있습니다: https://www.codewithgpu.com/i/RVC-Boss/GPT-SoVITS/GPT-SoVITS-Official
## 기능:
1. **제로샷 텍스트 음성 변환 (TTS):** 5초의 음성 샘플을 입력하면 즉시 텍스트를 음성으로 변환할 수 있습니다.
@ -33,46 +28,32 @@ https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-
4. **WebUI 도구:** 음성 반주 분리, 자동 훈련 데이터셋 분할, 중국어 자동 음성 인식(ASR) 및 텍스트 주석 등의 도구를 통합하여 초보자가 훈련 데이터셋과 GPT/SoVITS 모델을 생성하는 데 도움을 줍니다.
## 환경 준비
**데모 비디오를 확인하세요! [demo video](https://www.bilibili.com/video/BV12g4y1m7Uw)**
Windows 사용자는 (win>=10 에서 테스트되었습니다) 미리 빌드된 파일을 다운로드하여 설치할 수 있습니다. 다운로드 후 GPT-SoVITS-WebUI를 시작하려면 압축을 풀고 go-webui.bat을 두 번 클릭하면 됩니다.
보지 못한 발화자의 퓨샷(few-shot) 파인튜닝 데모:
### 테스트된 Python 및 PyTorch 버전
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
- Python 3.9, PyTorch 2.0.1 및 CUDA 11
- Python 3.10.13, PyTorch 2.1.2 및 CUDA 12.3
- Python 3.9, Pytorch 2.3.0.dev20240122 및 macOS 14.3 (Apple 칩, GPU)
**사용자 설명서: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
## 설치
### 테스트 통과 환경
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, Pytorch 2.2.2, macOS 14.4.1 (Apple Slilicon)
- Python 3.9, PyTorch 2.2.2, CPU 장치
_참고: numba==0.56.4 는 python<3.11 필요로 합니다._
### MacOS 사용자
### Windows
MacOS 사용자는 GPU를 사용하여 훈련 및 추론을 하려면 다음 조건을 충족해야 합니다:
Windows 사용자라면 (win>=10에서 테스트됨), [0206fix3 패키지](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta-fast-inference-branch.7z?download=true) 또는 [0217fix2 패키지](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta0217fix2.7z?download=true)를 다운로드하고 압축을 풀어 _go-webui.bat_ 파일을 더블 클릭하면 GPT-SoVITS-WebUI를 시작할 수 있습니다.
- Apple 칩이 장착된 Mac
- macOS 12.3 이상
- `xcode-select --install`을 실행하여 Xcode command-line tools를 설치했습니다.
_참고: 0206 버전은 추론 속도가 더 빠르지만, 0217 새 버전은 추론 품질이 더 좋습니다. 필요에 따라 선택할 수 있습니다._
_다른 Mac은 CPU를 사용하여 추론만 지원합니다._
그런 다음 다음 명령을 사용하여 설치합니다:
#### 환경 설정
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
```
#### 의존성 모듈 설치
```bash
pip install -r requirements.txt
pip uninstall torch torchaudio
pip3 install --pre torch torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
```
### Conda를 사용한 간편 설치
### Linux
```bash
conda create -n GPTSoVits python=3.9
@ -80,15 +61,30 @@ conda activate GPTSoVits
bash install.sh
```
### macOS
**주의: Mac에서 GPU로 훈련된 모델은 다른 OS에서 훈련된 모델에 비해 품질이 낮습니다. 해당 문제를 해결하기 전까지 MacOS에선 CPU를 사용하여 훈련을 진행합니다.**
1. `xcode-select --install`을 실행하여 Xcode 커맨드라인 도구를 설치하세요.
2. `brew install ffmpeg` 또는 `conda install ffmpeg`을 실행하여 FFmpeg를 설치하세요.
3. 위의 단계를 완료한 후, 다음 명령어를 실행하여 이 프로젝트를 설치하세요.
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### 수동 설치
#### Pip 패키지
#### 의존성 설치
```bash
pip install -r requirements.txt
```
#### FFmpeg
#### FFmpeg 설치
##### Conda 사용자
@ -104,12 +100,6 @@ sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### MacOS 사용자
```bash
brew install ffmpeg
```
##### Windows 사용자
[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe)와 [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe)를 GPT-SoVITS root 디렉토리에 넣습니다.
@ -144,11 +134,11 @@ docker compose -f "docker-compose.yaml" up -d
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
### 사전 훈련된 모델
## 사전 훈련된 모델
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS)에서 사전 훈련된 모델을 다운로드하고 `GPT_SoVITS\pretrained_models`에 넣습니다.
중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/damo_asr/models`에 넣습니다.
중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/asr/models`에 넣습니다.
UVR5(음성/반주 분리 및 잔향 제거)를 위해 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights)에서 모델을 다운로드하고 `tools/uvr5/uvr5_weights`에 넣습니다.
@ -193,7 +183,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
- [ ] 더 나은 sovits 기본 모델 (향상된 오디오 품질).
- [ ] 모델 블렌딩.
## (선택 사항) 필요한 경우 여기에서 명령줄 작업 모드를 제공합니다.
## (추가적인) 명령줄에서 실행하는 방법
명령줄을 사용하여 UVR5용 WebUI 열기
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
@ -214,33 +204,43 @@ python audio_slicer.py \
```
명령줄을 사용하여 데이터 세트 ASR 처리를 수행하는 방법입니다(중국어만 해당).
```
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
python tools/asr/funasr_asr.py -i <input> -o <output>
```
ASR 처리는 Faster_Whisper(중국어를 제외한 ASR 마킹)를 통해 수행됩니다.
(진행률 표시줄 없음, GPU 성능으로 인해 시간 지연이 발생할 수 있음)
```
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
```
사용자 정의 목록 저장 경로가 활성화되었습니다.
## 감사의 말
특별히 다음 프로젝트와 기여자에게 감사드립니다:
다음 프로젝트와 기여자에게 특별히 감사드립니다:
### 이론 연구
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### 사전 학습 모델
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### 추론용 텍스트 프론트엔드
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI 도구
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## 모든 기여자들에게 감사드립니다 ;)

258
docs/tr/README.md Normal file
View File

@ -0,0 +1,258 @@
<div align="center">
<h1>GPT-SoVITS-WebUI</h1>
Güçlü Birkaç Örnekli Ses Dönüştürme ve Metinden Konuşmaya Web Arayüzü.<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange)](https://github.com/RVC-Boss/GPT-SoVITS)
<img src="https://counter.seku.su/cmoe?name=gptsovits&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Boss/GPT-SoVITS/blob/main/colab_webui.ipynb)
[![License](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Boss/GPT-SoVITS/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Models%20Repo-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/GPT-SoVITS/tree/main)
[![Discord](https://img.shields.io/discord/1198701940511617164?color=%23738ADB&label=Discord&style=for-the-badge)](https://discord.gg/dnrgs5GHfG)
[**English**](../../README.md) | [**中文简体**](../cn/README.md) | [**日本語**](../ja/README.md) | [**한국어**](../ko/README.md) | **Türkçe**
</div>
---
## Özellikler:
1. **Sıfır Örnekli Metinden Konuşmaya:** 5 saniyelik bir vokal örneği girin ve anında metinden konuşmaya dönüşümünü deneyimleyin.
2. **Birkaç Örnekli Metinden Konuşmaya:** Daha iyi ses benzerliği ve gerçekçiliği için modeli yalnızca 1 dakikalık eğitim verisiyle ince ayarlayın.
3. **Çapraz Dil Desteği:** Eğitim veri setinden farklı dillerde çıkarım, şu anda İngilizce, Japonca ve Çinceyi destekliyor.
4. **Web Arayüzü Araçları:** Entegre araçlar arasında vokal eşliğinde ayırma, otomatik eğitim seti segmentasyonu, Çince ASR ve metin etiketleme bulunur ve yeni başlayanların eğitim veri setleri ve GPT/SoVITS modelleri oluşturmalarına yardımcı olur.
**[Demo videomuzu](https://www.bilibili.com/video/BV12g4y1m7Uw) buradan izleyin!**
Görünmeyen konuşmacılar birkaç örnekli ince ayar demosu:
https://github.com/RVC-Boss/GPT-SoVITS/assets/129054828/05bee1fa-bdd8-4d85-9350-80c060ab47fb
**Kullanıcı Kılavuzu: [简体中文](https://www.yuque.com/baicaigongchang1145haoyuangong/ib3g1e) | [English](https://rentry.co/GPT-SoVITS-guide#/)**
## Kurulum
### Test Edilmiş Ortamlar
- Python 3.9, PyTorch 2.0.1, CUDA 11
- Python 3.10.13, PyTorch 2.1.2, CUDA 12.3
- Python 3.9, PyTorch 2.2.2, macOS 14.4.1 (Apple silikon)
- Python 3.9, PyTorch 2.2.2, CPU cihazları
_Not: numba==0.56.4, py<3.11 gerektirir_
### Windows
Eğer bir Windows kullanıcısıysanız (win>=10 ile test edilmiştir), [0206fix3 paketini](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta-fast-inference-branch.7z?download=true) veya [0217fix2 paketini](https://huggingface.co/lj1995/GPT-SoVITS-windows-package/resolve/main/GPT-SoVITS-beta0217fix2.7z?download=true) indirip _go-webui.bat_ dosyasına çift tıklayarak GPT-SoVITS-WebUI'yi başlatabilirsiniz.
_Not: 0206 sürümünün çıkarım hızı daha hızlıdır, 0217 yeni sürümünün çıkarım kalitesi ise daha iyidir. İhtiyacınıza göre seçim yapabilirsiniz._
### Linux
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
bash install.sh
```
### macOS
**Not: Mac'lerde GPU'larla eğitilen modeller, diğer cihazlarda eğitilenlere göre önemli ölçüde daha düşük kalitede sonuç verir, bu nedenle geçici olarak CPU'lar kullanıyoruz.**
1. `xcode-select --install` komutunu çalıştırarak Xcode komut satırı araçlarını yükleyin
2. `brew install ffmpeg` veya `conda install ffmpeg` komutunu çalıştırarak FFmpeg'i yükleyin.
3. Aşağıdaki komutları çalıştırarak programı yükleyin:
```bash
conda create -n GPTSoVits python=3.9
conda activate GPTSoVits
pip install -r requirements.txt
```
### El ile Yükleme
#### Bağımlılıkları Yükleme
```bash
pip install -r requirements.txt
```
#### FFmpeg'i Yükleme
##### Conda Kullanıcıları
```bash
conda install ffmpeg
```
##### Ubuntu/Debian Kullanıcıları
```bash
sudo apt install ffmpeg
sudo apt install libsox-dev
conda install -c conda-forge 'ffmpeg<7'
```
##### Windows Kullanıcıları
[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe) ve [ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe) dosyalarını indirin ve GPT-SoVITS kök dizinine yerleştirin.
### Docker Kullanarak
#### docker-compose.yaml yapılandırması
0. Görüntü etiketleri hakkında: Kod tabanındaki hızlı güncellemeler ve görüntüleri paketleme ve test etme işleminin yavaş olması nedeniyle, lütfen şu anda paketlenmiş en son görüntüleri kontrol etmek için [Docker Hub](https://hub.docker.com/r/breakstring/gpt-sovits) adresini kontrol edin ve durumunuza göre seçim yapın veya alternatif olarak, kendi ihtiyaçlarınıza göre bir Dockerfile kullanarak yerel olarak oluşturun.
1. Ortam Değişkenleri
- is_half: Yarım hassasiyet/çift hassasiyeti kontrol eder. Bu genellikle "SSL çıkarma" adımı sırasında 4-cnhubert/5-wav32k dizinleri altındaki içeriğin doğru şekilde oluşturulmamasının nedenidir. Gerçek durumunuza göre True veya False olarak ayarlayın.
2. Birim YapılandırmasıKapsayıcı içindeki uygulamanın kök dizini /workspace olarak ayarlanmıştır. Varsayılan docker-compose.yaml, içerik yükleme/indirme için bazı pratik örnekler listeler.
3. shm_size Windows üzerinde Docker Desktop için varsayılan kullanılabilir bellek çok küçüktür, bu da anormal işlemlere neden olabilir. Kendi durumunuza göre ayarlayın.
4. Dağıtım bölümü altında, GPU ile ilgili ayarlar sisteminize ve gerçek koşullara göre dikkatlice ayarlanmalıdır.
#### docker compose ile çalıştırma
```
docker compose -f "docker-compose.yaml" up -d
```
#### docker komutu ile çalıştırma
Yukarıdaki gibi, ilgili parametreleri gerçek durumunuza göre değiştirin, ardından aşağıdaki komutu çalıştırın:
```
docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-DockerTest\output:/workspace/output --volume=G:\GPT-SoVITS-DockerTest\logs:/workspace/logs --volume=G:\GPT-SoVITS-DockerTest\SoVITS_weights:/workspace/SoVITS_weights --workdir=/workspace -p 9880:9880 -p 9871:9871 -p 9872:9872 -p 9873:9873 -p 9874:9874 --shm-size="16G" -d breakstring/gpt-sovits:xxxxx
```
## Önceden Eğitilmiş Modeller
Önceden eğitilmiş modelleri [GPT-SoVITS Modelleri](https://huggingface.co/lj1995/GPT-SoVITS) adresinden indirin ve `GPT_SoVITS/pretrained_models` dizinine yerleştirin.
UVR5 (Vokal/Eşlik Ayırma ve Yankı Giderme, ayrıca) için, modelleri [UVR5 Ağırlıkları](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) adresinden indirin ve `tools/uvr5/uvr5_weights` dizinine yerleştirin.
Çin bölgesindeki kullanıcılar, aşağıdaki bağlantıları girerek ve "Bir kopya indir"i tıklayarak bu iki modeli indirebilirler
- [GPT-SoVITS Modelleri](https://www.icloud.com.cn/iclouddrive/056y_Xog_HXpALuVUjscIwTtg#GPT-SoVITS_Models)
- [UVR5 Ağırlıkları](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
Çince ASR (ayrıca) için, modelleri [Damo ASR Modeli](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Modeli](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), ve [Damo Punc Modeli](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) adreslerinden indirin ve `tools/asr/models` dizinine yerleştirin.
İngilizce veya Japonca ASR (ayrıca) için, modelleri [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) adresinden indirin ve `tools/asr/models` dizinine yerleştirin. Ayrıca, [diğer modeller](https://huggingface.co/Systran) daha küçük disk alanı kaplamasıyla benzer etkiye sahip olabilir.
Çin bölgesindeki kullanıcılar, aşağıdaki bağlantıları girerek bu modeli indirebilirler
- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3) ("Bir kopya indir"i tıklayarak)
- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3) (HuggingFace ayna sitesi)
## Veri Seti Formatı
TTS açıklama .list dosya formatı:
```
vocal_path|speaker_name|language|text
```
Dil sözlüğü:
- 'zh': Çince
- 'ja': Japonca
- 'en': İngilizce
Örnek:
```
D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
```
## Yapılacaklar Listesi
- [ ] **Yüksek Öncelikli:**
- [x] Japonca ve İngilizceye yerelleştirme.
- [x] Kullanıcı kılavuzu.
- [x] Japonca ve İngilizce veri seti ince ayar eğitimi.
- [ ] **Özellikler:**
- [ ] Sıfır örnekli ses dönüştürme (5s) / birkaç örnekli ses dönüştürme (1dk).
- [ ] Metinden konuşmaya konuşma hızı kontrolü.
- [ ] Gelişmiş metinden konuşmaya duygu kontrolü.
- [ ] SoVITS token girdilerini kelime dağarcığı olasılık dağılımına değiştirme denemesi.
- [ ] İngilizce ve Japonca metin ön ucunu iyileştirme.
- [ ] Küçük ve büyük boyutlu metinden konuşmaya modelleri geliştirme.
- [x] Colab betikleri.
- [ ] Eğitim veri setini genişletmeyi dene (2k saat -> 10k saat).
- [ ] daha iyi sovits temel modeli (geliştirilmiş ses kalitesi)
- [ ] model karışımı
## (Ekstra) Komut satırından çalıştırma yöntemi
UVR5 için Web Arayüzünü açmak için komut satırını kullanın
```
python tools/uvr5/webui.py "<infer_device>" <is_half> <webui_port_uvr5>
```
Bir tarayıcı açamıyorsanız, UVR işleme için aşağıdaki formatı izleyin,Bu ses işleme için mdxnet kullanıyor
```
python mdxnet.py --model --input_root --output_vocal --output_ins --agg_level --format --device --is_half_precision
```
Veri setinin ses segmentasyonu komut satırı kullanılarak bu şekilde yapılır
```
python audio_slicer.py \
--input_path "<orijinal_ses_dosyası_veya_dizininin_yolu>" \
--output_root "<alt_bölümlere_ayrılmış_ses_kliplerinin_kaydedileceği_dizin>" \
--threshold <ses_eşiği> \
--min_length <her_bir_alt_klibin_minimum_süresi> \
--min_interval <bitişik_alt_klipler_arasındaki_en_kısa_zaman_aralığı>
--hop_size <ses_eğrisini_hesaplamak_için_adım_boyutu>
```
Veri seti ASR işleme komut satırı kullanılarak bu şekilde yapılır (Yalnızca Çince)
```
python tools/asr/funasr_asr.py -i <girdi> -o <çıktı>
```
ASR işleme Faster_Whisper aracılığıyla gerçekleştirilir (Çince dışındaki ASR işaretleme)
(İlerleme çubukları yok, GPU performansı zaman gecikmelerine neden olabilir)
```
python ./tools/asr/fasterwhisper_asr.py -i <girdi> -o <çıktı> -l <dil>
```
Özel bir liste kaydetme yolu etkinleştirildi
## Katkı Verenler
Özellikle aşağıdaki projelere ve katkıda bulunanlara teşekkür ederiz:
### Teorik Araştırma
- [ar-vits](https://github.com/innnky/ar-vits)
- [SoundStorm](https://github.com/yangdongchao/SoundStorm/tree/master/soundstorm/s1/AR)
- [vits](https://github.com/jaywalnut310/vits)
- [TransferTTS](https://github.com/hcy71o/TransferTTS/blob/master/models.py#L556)
- [contentvec](https://github.com/auspicious3000/contentvec/)
- [hifi-gan](https://github.com/jik876/hifi-gan)
- [fish-speech](https://github.com/fishaudio/fish-speech/blob/main/tools/llama/generate.py#L41)
### Önceden Eğitilmiş Modeller
- [Chinese Speech Pretrain](https://github.com/TencentGameMate/chinese_speech_pretrain)
- [Chinese-Roberta-WWM-Ext-Large](https://huggingface.co/hfl/chinese-roberta-wwm-ext-large)
### Tahmin İçin Metin Ön Ucu
- [paddlespeech zh_normalization](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization)
- [LangSegment](https://github.com/juntaosun/LangSegment)
### WebUI Araçları
- [ultimatevocalremovergui](https://github.com/Anjok07/ultimatevocalremovergui)
- [audio-slicer](https://github.com/openvpi/audio-slicer)
- [SubFix](https://github.com/cronrpc/SubFix)
- [FFmpeg](https://github.com/FFmpeg/FFmpeg)
- [gradio](https://github.com/gradio-app/gradio)
- [faster-whisper](https://github.com/SYSTRAN/faster-whisper)
- [FunASR](https://github.com/alibaba-damo-academy/FunASR)
## Tüm katkıda bulunanlara çabaları için teşekkürler
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
</a>

View File

@ -54,11 +54,11 @@
"source": [
"# @title Download pretrained models 下载预训练模型\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/tools/damo_asr/models\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/tools/asr/models\n",
"!mkdir -p /kaggle/working/GPT-SoVITS/tools/uvr5\n",
"%cd /kaggle/working/GPT-SoVITS/GPT_SoVITS/pretrained_models\n",
"!git clone https://huggingface.co/lj1995/GPT-SoVITS\n",
"%cd /kaggle/working/GPT-SoVITS/tools/damo_asr/models\n",
"%cd /kaggle/working/GPT-SoVITS/tools/asr/models\n",
"!git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git\n",
"!git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git\n",

View File

@ -2,6 +2,18 @@
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
"UVR5已开启": "UVR5 opened ",
"UVR5已关闭": "UVR5 closed",
"输入文件夹路径": "Input folder path",
"输出文件夹路径": "Output folder path",
"ASR 模型": "ASR model",
"ASR 模型尺寸": "ASR model size",
"ASR 语言设置": "ASR language",
"模型切换": "Model switch",
"是否开启dpo训练选项(实验性)": "Enable DPO training (experimental feature)",
"开启无参考文本模式。不填参考文本亦相当于开启。": "Enable no reference mode. If you don't fill 'Text for reference audio', no reference mode will be enabled.",
"使用无参考文本模式时建议使用微调的GPT": "Please use your trained GPT model if you don't use reference audio.",
"后续将支持转音素、手工修改音素、语音合成分步执行。": " Step-to-step phoneme transformation and modification coming soon!",
"gpt采样参数(无参考文本时不要太低)": "GPT parameters:",
"按标点符号切": "Slice by every punct",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE</b> for details.",
"0-前置数据集获取工具": "0-Fetch dataset",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)",

View File

@ -2,6 +2,18 @@
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
"UVR5已开启": "UVR5已开启",
"UVR5已关闭": "UVR5已关闭",
"输入文件夹路径": "输入文件夹路径",
"输出文件夹路径": "输出文件夹路径",
"ASR 模型": "ASR 模型",
"ASR 模型尺寸": "ASR 模型尺寸",
"ASR 语言设置": "ASR 语言设置",
"模型切换": "模型切换",
"是否开启dpo训练选项(实验性)": "是否开启dpo训练选项(实验性)",
"开启无参考文本模式。不填参考文本亦相当于开启。": "开启无参考文本模式。不填参考文本亦相当于开启。",
"使用无参考文本模式时建议使用微调的GPT": "使用无参考文本模式时建议使用微调的GPT",
"后续将支持转音素、手工修改音素、语音合成分步执行。": "后续将支持转音素、手工修改音素、语音合成分步执行。",
"gpt采样参数(无参考文本时不要太低)": "gpt采样参数(无参考文本时不要太低)",
"按标点符号切": "按标点符号切",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.",
"0-前置数据集获取工具": "0-前置数据集获取工具",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",

View File

@ -1,4 +1,4 @@
numpy
numpy==1.23.4
scipy
tensorboard
librosa==0.9.2
@ -25,3 +25,4 @@ jieba_fast
jieba
LangSegment>=0.2.0
Faster_Whisper
wordsegment

0
tools/__init__.py Normal file
View File

View File

@ -1,17 +1,15 @@
import argparse
import os
os.environ["HF_ENDPOINT"]="https://hf-mirror.com"
import traceback
import requests
from glob import glob
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import torch
from faster_whisper import WhisperModel
from tqdm import tqdm
from tools.asr.config import check_fw_local_models
from tools.asr.funasr_asr import only_asr
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
language_code_list = [
"af", "am", "ar", "as", "az",
@ -36,7 +34,7 @@ language_code_list = [
"vi", "yi", "yo", "zh", "yue",
"auto"]
def execute_asr(input_folder, output_folder, model_size, language,precision):
def execute_asr(input_folder, output_folder, model_size, language, precision):
if '-local' in model_size:
model_size = model_size[:-6]
model_path = f'tools/asr/models/faster-whisper-{model_size}'
@ -45,21 +43,23 @@ def execute_asr(input_folder, output_folder, model_size, language,precision):
if language == 'auto':
language = None #不设置语种由模型自动输出概率最高的语种
print("loading faster whisper model:",model_size,model_path)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
try:
model = WhisperModel(model_path, device="cuda", compute_type=precision)
model = WhisperModel(model_path, device=device, compute_type=precision)
except:
return print(traceback.format_exc())
input_file_names = os.listdir(input_folder)
input_file_names.sort()
output = []
output_file_name = os.path.basename(input_folder)
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
for file_name in tqdm(input_file_names):
try:
file_path = os.path.join(input_folder, file_name)
segments, info = model.transcribe(
audio = file,
audio = file_path,
beam_size = 5,
vad_filter = True,
vad_parameters = dict(min_silence_duration_ms=700),
@ -67,16 +67,23 @@ def execute_asr(input_folder, output_folder, model_size, language,precision):
text = ''
if info.language == "zh":
print("检测为中文文本,转funasr处理")
text = only_asr(file)
print("检测为中文文本, 转 FunASR 处理")
if("only_asr"not in globals()):
from tools.asr.funasr_asr import \
only_asr # #如果用英文就不需要导入下载模型
text = only_asr(file_path)
if text == '':
for segment in segments:
text += segment.text
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
output.append(f"{file_path}|{output_file_name}|{info.language.upper()}|{text}")
except:
return print(traceback.format_exc())
output_folder = output_folder or "output/asr_opt"
os.makedirs(output_folder, exist_ok=True)
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
with open(output_file_path, "w", encoding="utf-8") as f:
f.write("\n".join(output))
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")

View File

@ -38,10 +38,11 @@ def execute_asr(input_folder, output_folder, model_size, language):
output = []
output_file_name = os.path.basename(input_folder)
for name in tqdm(input_file_names):
for file_name in tqdm(input_file_names):
try:
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
file_path = os.path.join(input_folder, file_name)
text = model.generate(input=file_path)[0]["text"]
output.append(f"{file_path}|{output_file_name}|{language.upper()}|{text}")
except:
print(traceback.format_exc())

View File

@ -28,4 +28,4 @@ def load_audio(file, sr):
def clean_path(path_str):
if platform.system() == 'Windows':
path_str = path_str.replace('/', '\\')
return path_str.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
return path_str.strip(" ").strip('"').strip("\n").strip('"').strip(" ").strip("\u202a")

View File

@ -220,7 +220,7 @@ class Predictor:
opt_path_other = path_other[:-4] + ".%s" % format
if os.path.exists(path_vocal):
os.system(
"ffmpeg -i %s -vn %s -q:a 2 -y" % (path_vocal, opt_path_vocal)
"ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_vocal, opt_path_vocal)
)
if os.path.exists(opt_path_vocal):
try:
@ -229,7 +229,7 @@ class Predictor:
pass
if os.path.exists(path_other):
os.system(
"ffmpeg -i %s -vn %s -q:a 2 -y" % (path_other, opt_path_other)
"ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_other, opt_path_other)
)
if os.path.exists(opt_path_other):
try:
@ -252,5 +252,5 @@ class MDXNetDereverb:
self.pred = Predictor(self)
self.device = cpu
def _path_audio_(self, input, vocal_root, others_root, format, is_hp3=False):
def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False):
self.pred.prediction(input, vocal_root, others_root, format)

2
tools/uvr5/uvr5_weights/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -73,8 +73,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
os.path.basename(inp_path),
)
os.system(
"ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
% (inp_path, tmp_path)
f'ffmpeg -i "{inp_path}" -vn -acodec pcm_s16le -ac 2 -ar 44100 "{tmp_path}" -y'
)
inp_path = tmp_path
try:

View File

@ -1,6 +1,6 @@
import os,shutil,sys,pdb,re
now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.insert(0, now_dir)
import json,yaml,warnings,torch
import platform
import psutil
@ -55,7 +55,7 @@ from scipy.io import wavfile
from tools.my_utils import load_audio
from multiprocessing import cpu_count
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
# os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 当遇到mps不支持的步骤时使用cpu
n_cpu=cpu_count()
@ -73,18 +73,19 @@ if torch.cuda.is_available() or ngpu != 0:
if_gpu_ok = True # 至少有一张能用的N卡
gpu_infos.append("%s\t%s" % (i, gpu_name))
mem.append(int(torch.cuda.get_device_properties(i).total_memory/ 1024/ 1024/ 1024+ 0.4))
# 判断是否支持mps加速
if torch.backends.mps.is_available():
if_gpu_ok = True
gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
# # 判断是否支持mps加速
# if torch.backends.mps.is_available():
# if_gpu_ok = True
# gpu_infos.append("%s\t%s" % ("0", "Apple GPU"))
# mem.append(psutil.virtual_memory().total/ 1024 / 1024 / 1024) # 实测使用系统内存作为显存不会爆显存
if if_gpu_ok and len(gpu_infos) > 0:
gpu_info = "\n".join(gpu_infos)
default_batch_size = min(mem) // 2
else:
gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
default_batch_size = 1
gpu_info = ("%s\t%s" % ("0", "CPU"))
gpu_infos.append("%s\t%s" % ("0", "CPU"))
default_batch_size = int(psutil.virtual_memory().total/ 1024 / 1024 / 1024 / 2)
gpus = "-".join([i[0] for i in gpu_infos])
pretrained_sovits_name="GPT_SoVITS/pretrained_models/s2G488k.pth"
@ -417,7 +418,10 @@ def open1a(inp_text,inp_wav_dir,exp_name,gpu_numbers,bert_pretrained_dir):
with open(path_text, "w", encoding="utf8") as f:
f.write("\n".join(opt) + "\n")
ps1a=[]
yield "文本进程结束",{"__type__":"update","visible":True},{"__type__":"update","visible":False}
if len("".join(opt)) > 0:
yield "文本进程成功", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
else:
yield "文本进程失败", {"__type__": "update", "visible": True}, {"__type__": "update", "visible": False}
else:
yield "已有正在进行的文本任务,需先终止才能开启下一次任务", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
@ -582,7 +586,7 @@ def open1abc(inp_text,inp_wav_dir,exp_name,gpu_numbers1a,gpu_numbers1Ba,gpu_numb
os.remove(txt_path)
with open(path_text, "w",encoding="utf8") as f:
f.write("\n".join(opt) + "\n")
assert len("".join(opt)) > 0, "1Aa-文本获取进程失败"
yield "进度1a-done", {"__type__": "update", "visible": False}, {"__type__": "update", "visible": True}
ps1abc=[]
#############################1b