mirror of
https://github.com/RVC-Boss/GPT-SoVITS.git
synced 2025-10-10 01:00:00 +08:00
Merge branch 'main' of https://github.com/KakaruHayate/GPT-SoVITS-musa
This commit is contained in:
commit
adad163625
@ -1 +1,2 @@
|
|||||||
CHATGPT CH AE1 T JH IY1 P IY1 T IY1
|
CHATGPT CH AE1 T JH IY1 P IY1 T IY1
|
||||||
|
JSON JH EY1 S AH0 N
|
@ -8,11 +8,19 @@ from string import punctuation
|
|||||||
|
|
||||||
from text import symbols
|
from text import symbols
|
||||||
|
|
||||||
|
import unicodedata
|
||||||
|
from builtins import str as unicode
|
||||||
|
from g2p_en.expand import normalize_numbers
|
||||||
|
from nltk.tokenize import TweetTokenizer
|
||||||
|
word_tokenize = TweetTokenizer().tokenize
|
||||||
|
from nltk import pos_tag
|
||||||
|
|
||||||
current_file_path = os.path.dirname(__file__)
|
current_file_path = os.path.dirname(__file__)
|
||||||
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
||||||
CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
|
CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
|
||||||
CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
|
CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
|
||||||
CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
|
CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
|
||||||
|
NAMECACHE_PATH = os.path.join(current_file_path, "namedict_cache.pickle")
|
||||||
|
|
||||||
arpa = {
|
arpa = {
|
||||||
"AH0",
|
"AH0",
|
||||||
@ -155,6 +163,9 @@ def read_dict_new():
|
|||||||
line_index = line_index + 1
|
line_index = line_index + 1
|
||||||
line = f.readline()
|
line = f.readline()
|
||||||
|
|
||||||
|
return g2p_dict
|
||||||
|
|
||||||
|
def hot_reload_hot(g2p_dict):
|
||||||
with open(CMU_DICT_HOT_PATH) as f:
|
with open(CMU_DICT_HOT_PATH) as f:
|
||||||
line = f.readline()
|
line = f.readline()
|
||||||
line_index = 1
|
line_index = 1
|
||||||
@ -185,10 +196,19 @@ def get_dict():
|
|||||||
g2p_dict = read_dict_new()
|
g2p_dict = read_dict_new()
|
||||||
cache_dict(g2p_dict, CACHE_PATH)
|
cache_dict(g2p_dict, CACHE_PATH)
|
||||||
|
|
||||||
|
g2p_dict = hot_reload_hot(g2p_dict)
|
||||||
|
|
||||||
return g2p_dict
|
return g2p_dict
|
||||||
|
|
||||||
|
|
||||||
eng_dict = get_dict()
|
def get_namedict():
|
||||||
|
if os.path.exists(NAMECACHE_PATH):
|
||||||
|
with open(NAMECACHE_PATH, "rb") as pickle_file:
|
||||||
|
name_dict = pickle.load(pickle_file)
|
||||||
|
else:
|
||||||
|
name_dict = {}
|
||||||
|
|
||||||
|
return name_dict
|
||||||
|
|
||||||
|
|
||||||
def text_normalize(text):
|
def text_normalize(text):
|
||||||
@ -204,6 +224,16 @@ def text_normalize(text):
|
|||||||
for p, r in rep_map.items():
|
for p, r in rep_map.items():
|
||||||
text = re.sub(p, r, text)
|
text = re.sub(p, r, text)
|
||||||
|
|
||||||
|
# 来自 g2p_en 文本格式化处理
|
||||||
|
# 增加大写兼容
|
||||||
|
text = unicode(text)
|
||||||
|
text = normalize_numbers(text)
|
||||||
|
text = ''.join(char for char in unicodedata.normalize('NFD', text)
|
||||||
|
if unicodedata.category(char) != 'Mn') # Strip accents
|
||||||
|
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
|
||||||
|
text = re.sub(r"(?i)i\.e\.", "that is", text)
|
||||||
|
text = re.sub(r"(?i)e\.g\.", "for example", text)
|
||||||
|
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
@ -213,37 +243,106 @@ class en_G2p(G2p):
|
|||||||
# 分词初始化
|
# 分词初始化
|
||||||
wordsegment.load()
|
wordsegment.load()
|
||||||
|
|
||||||
# 扩展过时字典
|
# 扩展过时字典, 添加姓名字典
|
||||||
self.cmu = get_dict()
|
self.cmu = get_dict()
|
||||||
|
self.namedict = get_namedict()
|
||||||
|
|
||||||
# 剔除读音错误的几个缩写
|
# 剔除读音错误的几个缩写
|
||||||
for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
|
for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
|
||||||
del self.cmu[word.lower()]
|
del self.cmu[word.lower()]
|
||||||
|
|
||||||
# "A" 落单不读 "AH0" 读 "EY1"
|
# 修正多音字
|
||||||
self.cmu['a'] = [['EY1']]
|
self.homograph2features["read"] = (['R', 'IY1', 'D'], ['R', 'EH1', 'D'], 'VBP')
|
||||||
|
self.homograph2features["complex"] = (['K', 'AH0', 'M', 'P', 'L', 'EH1', 'K', 'S'], ['K', 'AA1', 'M', 'P', 'L', 'EH0', 'K', 'S'], 'JJ')
|
||||||
|
|
||||||
|
|
||||||
def predict(self, word):
|
def __call__(self, text):
|
||||||
# 小写 oov 长度小于等于 3 直接读字母
|
# tokenization
|
||||||
if (len(word) <= 3):
|
words = word_tokenize(text)
|
||||||
return [phone for w in word for phone in self(w)]
|
tokens = pos_tag(words) # tuples of (word, tag)
|
||||||
|
|
||||||
|
# steps
|
||||||
|
prons = []
|
||||||
|
for o_word, pos in tokens:
|
||||||
|
# 还原 g2p_en 小写操作逻辑
|
||||||
|
word = o_word.lower()
|
||||||
|
|
||||||
|
if re.search("[a-z]", word) is None:
|
||||||
|
pron = [word]
|
||||||
|
# 先把单字母推出去
|
||||||
|
elif len(word) == 1:
|
||||||
|
# 单读 A 发音修正, 这里需要原格式 o_word 判断大写
|
||||||
|
if o_word == "A":
|
||||||
|
pron = ['EY1']
|
||||||
|
else:
|
||||||
|
pron = self.cmu[word][0]
|
||||||
|
# g2p_en 原版多音字处理
|
||||||
|
elif word in self.homograph2features: # Check homograph
|
||||||
|
pron1, pron2, pos1 = self.homograph2features[word]
|
||||||
|
if pos.startswith(pos1):
|
||||||
|
pron = pron1
|
||||||
|
# pos1比pos长仅出现在read
|
||||||
|
elif len(pos) < len(pos1) and pos == pos1[:len(pos)]:
|
||||||
|
pron = pron1
|
||||||
|
else:
|
||||||
|
pron = pron2
|
||||||
|
else:
|
||||||
|
# 递归查找预测
|
||||||
|
pron = self.qryword(o_word)
|
||||||
|
|
||||||
|
prons.extend(pron)
|
||||||
|
prons.extend([" "])
|
||||||
|
|
||||||
|
return prons[:-1]
|
||||||
|
|
||||||
|
|
||||||
|
def qryword(self, o_word):
|
||||||
|
word = o_word.lower()
|
||||||
|
|
||||||
|
# 查字典, 单字母除外
|
||||||
|
if len(word) > 1 and word in self.cmu: # lookup CMU dict
|
||||||
|
return self.cmu[word][0]
|
||||||
|
|
||||||
|
# 单词仅首字母大写时查找姓名字典
|
||||||
|
if o_word.istitle() and word in self.namedict:
|
||||||
|
return self.namedict[word][0]
|
||||||
|
|
||||||
|
# oov 长度小于等于 3 直接读字母
|
||||||
|
if len(word) <= 3:
|
||||||
|
phones = []
|
||||||
|
for w in word:
|
||||||
|
# 单读 A 发音修正, 此处不存在大写的情况
|
||||||
|
if w == "a":
|
||||||
|
phones.extend(['EY1'])
|
||||||
|
else:
|
||||||
|
phones.extend(self.cmu[w][0])
|
||||||
|
return phones
|
||||||
|
|
||||||
# 尝试分离所有格
|
# 尝试分离所有格
|
||||||
if re.match(r"^([a-z]+)('s)$", word):
|
if re.match(r"^([a-z]+)('s)$", word):
|
||||||
phone = self(word[:-2])
|
phones = self.qryword(word[:-2])
|
||||||
phone.extend(['Z'])
|
# P T K F TH HH 无声辅音结尾 's 发 ['S']
|
||||||
return phone
|
if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']:
|
||||||
|
phones.extend(['S'])
|
||||||
|
# S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z']
|
||||||
|
elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']:
|
||||||
|
phones.extend(['AH0', 'Z'])
|
||||||
|
# B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z']
|
||||||
|
# AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2
|
||||||
|
# ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z']
|
||||||
|
else:
|
||||||
|
phones.extend(['Z'])
|
||||||
|
return phones
|
||||||
|
|
||||||
# 尝试进行分词,应对复合词
|
# 尝试进行分词,应对复合词
|
||||||
comps = wordsegment.segment(word.lower())
|
comps = wordsegment.segment(word.lower())
|
||||||
|
|
||||||
# 无法分词的送回去预测
|
# 无法分词的送回去预测
|
||||||
if len(comps)==1:
|
if len(comps)==1:
|
||||||
return super().predict(word)
|
return self.predict(word)
|
||||||
|
|
||||||
# 可以分词的递归处理
|
# 可以分词的递归处理
|
||||||
return [phone for comp in comps for phone in self(comp)]
|
return [phone for comp in comps for phone in self.qryword(comp)]
|
||||||
|
|
||||||
|
|
||||||
_g2p = en_G2p()
|
_g2p = en_G2p()
|
||||||
@ -258,12 +357,6 @@ def g2p(text):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# print(get_dict())
|
|
||||||
print(g2p("hello"))
|
print(g2p("hello"))
|
||||||
print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
|
print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture.")))
|
||||||
# all_phones = set()
|
print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")))
|
||||||
# for k, syllables in eng_dict.items():
|
|
||||||
# for group in syllables:
|
|
||||||
# for ph in group:
|
|
||||||
# all_phones.add(ph)
|
|
||||||
# print(all_phones)
|
|
BIN
GPT_SoVITS/text/namedict_cache.pickle
Normal file
BIN
GPT_SoVITS/text/namedict_cache.pickle
Normal file
Binary file not shown.
14
README.md
14
README.md
@ -141,7 +141,15 @@ Users in China region can download these two models by entering the links below
|
|||||||
|
|
||||||
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
|
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
|
||||||
|
|
||||||
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`.
|
For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/asr/models`.
|
||||||
|
|
||||||
|
For English or Japanese ASR (additionally), download models from [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) and place them in `tools/asr/models`. Also, [other models](https://huggingface.co/Systran) may have the similar effect with smaller disk footprint.
|
||||||
|
|
||||||
|
Users in China region can download this model by entering the links below
|
||||||
|
|
||||||
|
- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3) (clicking "Download a copy")
|
||||||
|
|
||||||
|
- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3) (HuggingFace mirror site)
|
||||||
|
|
||||||
## Dataset Format
|
## Dataset Format
|
||||||
|
|
||||||
@ -204,13 +212,13 @@ python audio_slicer.py \
|
|||||||
```
|
```
|
||||||
This is how dataset ASR processing is done using the command line(Only Chinese)
|
This is how dataset ASR processing is done using the command line(Only Chinese)
|
||||||
```
|
```
|
||||||
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
|
python tools/asr/funasr_asr.py -i <input> -o <output>
|
||||||
```
|
```
|
||||||
ASR processing is performed through Faster_Whisper(ASR marking except Chinese)
|
ASR processing is performed through Faster_Whisper(ASR marking except Chinese)
|
||||||
|
|
||||||
(No progress bars, GPU performance may cause time delays)
|
(No progress bars, GPU performance may cause time delays)
|
||||||
```
|
```
|
||||||
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
|
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
|
||||||
```
|
```
|
||||||
A custom list save path is enabled
|
A custom list save path is enabled
|
||||||
|
|
||||||
|
515
api.py
515
api.py
@ -18,6 +18,9 @@
|
|||||||
`-p` - `绑定端口, 默认9880, 可在 config.py 中指定`
|
`-p` - `绑定端口, 默认9880, 可在 config.py 中指定`
|
||||||
`-fp` - `覆盖 config.py 使用全精度`
|
`-fp` - `覆盖 config.py 使用全精度`
|
||||||
`-hp` - `覆盖 config.py 使用半精度`
|
`-hp` - `覆盖 config.py 使用半精度`
|
||||||
|
`-sm` - `流式返回模式, 默认不启用, "close","c", "normal","n", "keepalive","k"`
|
||||||
|
·-mt` - `返回的音频编码格式, 流式默认ogg, 非流式默认wav, "wav", "ogg", "aac"`
|
||||||
|
·-cp` - `文本切分符号设定, 默认为空, 以",.,。"字符串的方式传入`
|
||||||
|
|
||||||
`-hb` - `cnhubert路径`
|
`-hb` - `cnhubert路径`
|
||||||
`-b` - `bert路径`
|
`-b` - `bert路径`
|
||||||
@ -39,6 +42,18 @@ POST:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
使用执行参数指定的参考音频并设定分割符号:
|
||||||
|
GET:
|
||||||
|
`http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&cut_punc=,。`
|
||||||
|
POST:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。",
|
||||||
|
"text_language": "zh",
|
||||||
|
"cut_punc": ",。",
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
手动指定当次推理所使用的参考音频:
|
手动指定当次推理所使用的参考音频:
|
||||||
GET:
|
GET:
|
||||||
`http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh`
|
`http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh`
|
||||||
@ -103,14 +118,10 @@ RESP: 无
|
|||||||
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os,re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
now_dir = os.getcwd()
|
|
||||||
sys.path.append(now_dir)
|
|
||||||
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
|
||||||
|
|
||||||
import signal
|
import signal
|
||||||
|
import LangSegment
|
||||||
from time import time as ttime
|
from time import time as ttime
|
||||||
import torch
|
import torch
|
||||||
import librosa
|
import librosa
|
||||||
@ -129,35 +140,8 @@ from text.cleaner import clean_text
|
|||||||
from module.mel_processing import spectrogram_torch
|
from module.mel_processing import spectrogram_torch
|
||||||
from my_utils import load_audio
|
from my_utils import load_audio
|
||||||
import config as global_config
|
import config as global_config
|
||||||
|
import logging
|
||||||
g_config = global_config.Config()
|
import subprocess
|
||||||
|
|
||||||
# AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
|
|
||||||
|
|
||||||
parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径")
|
|
||||||
parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径")
|
|
||||||
|
|
||||||
parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径")
|
|
||||||
parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
|
|
||||||
parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
|
|
||||||
|
|
||||||
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu")
|
|
||||||
parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
|
|
||||||
parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
|
|
||||||
parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度")
|
|
||||||
parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度")
|
|
||||||
# bool值的用法为 `python ./api.py -fp ...`
|
|
||||||
# 此时 full_precision==True, half_precision==False
|
|
||||||
|
|
||||||
parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path")
|
|
||||||
parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path")
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
sovits_path = args.sovits_path
|
|
||||||
gpt_path = args.gpt_path
|
|
||||||
|
|
||||||
|
|
||||||
class DefaultRefer:
|
class DefaultRefer:
|
||||||
@ -170,50 +154,6 @@ class DefaultRefer:
|
|||||||
return is_full(self.path, self.text, self.language)
|
return is_full(self.path, self.text, self.language)
|
||||||
|
|
||||||
|
|
||||||
default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language)
|
|
||||||
|
|
||||||
device = args.device
|
|
||||||
port = args.port
|
|
||||||
host = args.bind_addr
|
|
||||||
|
|
||||||
if sovits_path == "":
|
|
||||||
sovits_path = g_config.pretrained_sovits_path
|
|
||||||
print(f"[WARN] 未指定SoVITS模型路径, fallback后当前值: {sovits_path}")
|
|
||||||
if gpt_path == "":
|
|
||||||
gpt_path = g_config.pretrained_gpt_path
|
|
||||||
print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}")
|
|
||||||
|
|
||||||
# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用
|
|
||||||
if default_refer.path == "" or default_refer.text == "" or default_refer.language == "":
|
|
||||||
default_refer.path, default_refer.text, default_refer.language = "", "", ""
|
|
||||||
print("[INFO] 未指定默认参考音频")
|
|
||||||
else:
|
|
||||||
print(f"[INFO] 默认参考音频路径: {default_refer.path}")
|
|
||||||
print(f"[INFO] 默认参考音频文本: {default_refer.text}")
|
|
||||||
print(f"[INFO] 默认参考音频语种: {default_refer.language}")
|
|
||||||
|
|
||||||
is_half = g_config.is_half
|
|
||||||
if args.full_precision:
|
|
||||||
is_half = False
|
|
||||||
if args.half_precision:
|
|
||||||
is_half = True
|
|
||||||
if args.full_precision and args.half_precision:
|
|
||||||
is_half = g_config.is_half # 炒饭fallback
|
|
||||||
|
|
||||||
print(f"[INFO] 半精: {is_half}")
|
|
||||||
|
|
||||||
cnhubert_base_path = args.hubert_path
|
|
||||||
bert_path = args.bert_path
|
|
||||||
|
|
||||||
cnhubert.cnhubert_base_path = cnhubert_base_path
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
|
||||||
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
|
|
||||||
if is_half:
|
|
||||||
bert_model = bert_model.half().to(device)
|
|
||||||
else:
|
|
||||||
bert_model = bert_model.to(device)
|
|
||||||
|
|
||||||
|
|
||||||
def is_empty(*items): # 任意一项不为空返回False
|
def is_empty(*items): # 任意一项不为空返回False
|
||||||
for item in items:
|
for item in items:
|
||||||
if item is not None and item != "":
|
if item is not None and item != "":
|
||||||
@ -227,6 +167,7 @@ def is_full(*items): # 任意一项为空返回False
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def change_sovits_weights(sovits_path):
|
def change_sovits_weights(sovits_path):
|
||||||
global vq_model, hps
|
global vq_model, hps
|
||||||
dict_s2 = torch.load(sovits_path, map_location="cpu")
|
dict_s2 = torch.load(sovits_path, map_location="cpu")
|
||||||
@ -246,9 +187,9 @@ def change_sovits_weights(sovits_path):
|
|||||||
else:
|
else:
|
||||||
vq_model = vq_model.to(device)
|
vq_model = vq_model.to(device)
|
||||||
vq_model.eval()
|
vq_model.eval()
|
||||||
print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
|
vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
||||||
with open("./sweight.txt", "w", encoding="utf-8") as f:
|
|
||||||
f.write(sovits_path)
|
|
||||||
def change_gpt_weights(gpt_path):
|
def change_gpt_weights(gpt_path):
|
||||||
global hz, max_sec, t2s_model, config
|
global hz, max_sec, t2s_model, config
|
||||||
hz = 50
|
hz = 50
|
||||||
@ -262,8 +203,7 @@ def change_gpt_weights(gpt_path):
|
|||||||
t2s_model = t2s_model.to(device)
|
t2s_model = t2s_model.to(device)
|
||||||
t2s_model.eval()
|
t2s_model.eval()
|
||||||
total = sum([param.nelement() for param in t2s_model.parameters()])
|
total = sum([param.nelement() for param in t2s_model.parameters()])
|
||||||
print("Number of parameter: %.2fM" % (total / 1e6))
|
logger.info("Number of parameter: %.2fM" % (total / 1e6))
|
||||||
with open("./gweight.txt", "w", encoding="utf-8") as f: f.write(gpt_path)
|
|
||||||
|
|
||||||
|
|
||||||
def get_bert_feature(text, word2ph):
|
def get_bert_feature(text, word2ph):
|
||||||
@ -283,9 +223,81 @@ def get_bert_feature(text, word2ph):
|
|||||||
return phone_level_feature.T
|
return phone_level_feature.T
|
||||||
|
|
||||||
|
|
||||||
n_semantic = 1024
|
def clean_text_inf(text, language):
|
||||||
dict_s2 = torch.load(sovits_path, map_location="cpu")
|
phones, word2ph, norm_text = clean_text(text, language)
|
||||||
hps = dict_s2["config"]
|
phones = cleaned_text_to_sequence(phones)
|
||||||
|
return phones, word2ph, norm_text
|
||||||
|
|
||||||
|
|
||||||
|
def get_bert_inf(phones, word2ph, norm_text, language):
|
||||||
|
language=language.replace("all_","")
|
||||||
|
if language == "zh":
|
||||||
|
bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
|
||||||
|
else:
|
||||||
|
bert = torch.zeros(
|
||||||
|
(1024, len(phones)),
|
||||||
|
dtype=torch.float16 if is_half == True else torch.float32,
|
||||||
|
).to(device)
|
||||||
|
|
||||||
|
return bert
|
||||||
|
|
||||||
|
|
||||||
|
def get_phones_and_bert(text,language):
|
||||||
|
if language in {"en","all_zh","all_ja"}:
|
||||||
|
language = language.replace("all_","")
|
||||||
|
if language == "en":
|
||||||
|
LangSegment.setfilters(["en"])
|
||||||
|
formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
|
||||||
|
else:
|
||||||
|
# 因无法区别中日文汉字,以用户输入为准
|
||||||
|
formattext = text
|
||||||
|
while " " in formattext:
|
||||||
|
formattext = formattext.replace(" ", " ")
|
||||||
|
phones, word2ph, norm_text = clean_text_inf(formattext, language)
|
||||||
|
if language == "zh":
|
||||||
|
bert = get_bert_feature(norm_text, word2ph).to(device)
|
||||||
|
else:
|
||||||
|
bert = torch.zeros(
|
||||||
|
(1024, len(phones)),
|
||||||
|
dtype=torch.float16 if is_half == True else torch.float32,
|
||||||
|
).to(device)
|
||||||
|
elif language in {"zh", "ja","auto"}:
|
||||||
|
textlist=[]
|
||||||
|
langlist=[]
|
||||||
|
LangSegment.setfilters(["zh","ja","en","ko"])
|
||||||
|
if language == "auto":
|
||||||
|
for tmp in LangSegment.getTexts(text):
|
||||||
|
if tmp["lang"] == "ko":
|
||||||
|
langlist.append("zh")
|
||||||
|
textlist.append(tmp["text"])
|
||||||
|
else:
|
||||||
|
langlist.append(tmp["lang"])
|
||||||
|
textlist.append(tmp["text"])
|
||||||
|
else:
|
||||||
|
for tmp in LangSegment.getTexts(text):
|
||||||
|
if tmp["lang"] == "en":
|
||||||
|
langlist.append(tmp["lang"])
|
||||||
|
else:
|
||||||
|
# 因无法区别中日文汉字,以用户输入为准
|
||||||
|
langlist.append(language)
|
||||||
|
textlist.append(tmp["text"])
|
||||||
|
# logger.info(textlist)
|
||||||
|
# logger.info(langlist)
|
||||||
|
phones_list = []
|
||||||
|
bert_list = []
|
||||||
|
norm_text_list = []
|
||||||
|
for i in range(len(textlist)):
|
||||||
|
lang = langlist[i]
|
||||||
|
phones, word2ph, norm_text = clean_text_inf(textlist[i], lang)
|
||||||
|
bert = get_bert_inf(phones, word2ph, norm_text, lang)
|
||||||
|
phones_list.append(phones)
|
||||||
|
norm_text_list.append(norm_text)
|
||||||
|
bert_list.append(bert)
|
||||||
|
bert = torch.cat(bert_list, dim=1)
|
||||||
|
phones = sum(phones_list, [])
|
||||||
|
norm_text = ''.join(norm_text_list)
|
||||||
|
|
||||||
|
return phones,bert.to(torch.float16 if is_half == True else torch.float32),norm_text
|
||||||
|
|
||||||
|
|
||||||
class DictToAttrRecursive:
|
class DictToAttrRecursive:
|
||||||
@ -298,39 +310,6 @@ class DictToAttrRecursive:
|
|||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
|
||||||
hps = DictToAttrRecursive(hps)
|
|
||||||
hps.model.semantic_frame_rate = "25hz"
|
|
||||||
dict_s1 = torch.load(gpt_path, map_location="cpu")
|
|
||||||
config = dict_s1["config"]
|
|
||||||
ssl_model = cnhubert.get_model()
|
|
||||||
if is_half:
|
|
||||||
ssl_model = ssl_model.half().to(device)
|
|
||||||
else:
|
|
||||||
ssl_model = ssl_model.to(device)
|
|
||||||
|
|
||||||
vq_model = SynthesizerTrn(
|
|
||||||
hps.data.filter_length // 2 + 1,
|
|
||||||
hps.train.segment_size // hps.data.hop_length,
|
|
||||||
n_speakers=hps.data.n_speakers,
|
|
||||||
**hps.model)
|
|
||||||
if is_half:
|
|
||||||
vq_model = vq_model.half().to(device)
|
|
||||||
else:
|
|
||||||
vq_model = vq_model.to(device)
|
|
||||||
vq_model.eval()
|
|
||||||
print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
|
|
||||||
hz = 50
|
|
||||||
max_sec = config['data']['max_sec']
|
|
||||||
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
|
|
||||||
t2s_model.load_state_dict(dict_s1["weight"])
|
|
||||||
if is_half:
|
|
||||||
t2s_model = t2s_model.half()
|
|
||||||
t2s_model = t2s_model.to(device)
|
|
||||||
t2s_model.eval()
|
|
||||||
total = sum([param.nelement() for param in t2s_model.parameters()])
|
|
||||||
print("Number of parameter: %.2fM" % (total / 1e6))
|
|
||||||
|
|
||||||
|
|
||||||
def get_spepc(hps, filename):
|
def get_spepc(hps, filename):
|
||||||
audio = load_audio(filename, int(hps.data.sampling_rate))
|
audio = load_audio(filename, int(hps.data.sampling_rate))
|
||||||
audio = torch.FloatTensor(audio)
|
audio = torch.FloatTensor(audio)
|
||||||
@ -341,17 +320,86 @@ def get_spepc(hps, filename):
|
|||||||
return spec
|
return spec
|
||||||
|
|
||||||
|
|
||||||
dict_language = {
|
def pack_audio(audio_bytes, data, rate):
|
||||||
"中文": "zh",
|
if media_type == "ogg":
|
||||||
"英文": "en",
|
audio_bytes = pack_ogg(audio_bytes, data, rate)
|
||||||
"日文": "ja",
|
elif media_type == "aac":
|
||||||
"ZH": "zh",
|
audio_bytes = pack_aac(audio_bytes, data, rate)
|
||||||
"EN": "en",
|
else:
|
||||||
"JA": "ja",
|
# wav无法流式, 先暂存raw
|
||||||
"zh": "zh",
|
audio_bytes = pack_raw(audio_bytes, data, rate)
|
||||||
"en": "en",
|
|
||||||
"ja": "ja"
|
return audio_bytes
|
||||||
}
|
|
||||||
|
|
||||||
|
def pack_ogg(audio_bytes, data, rate):
|
||||||
|
with sf.SoundFile(audio_bytes, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file:
|
||||||
|
audio_file.write(data)
|
||||||
|
|
||||||
|
return audio_bytes
|
||||||
|
|
||||||
|
|
||||||
|
def pack_raw(audio_bytes, data, rate):
|
||||||
|
audio_bytes.write(data.tobytes())
|
||||||
|
|
||||||
|
return audio_bytes
|
||||||
|
|
||||||
|
|
||||||
|
def pack_wav(audio_bytes, rate):
|
||||||
|
data = np.frombuffer(audio_bytes.getvalue(),dtype=np.int16)
|
||||||
|
wav_bytes = BytesIO()
|
||||||
|
sf.write(wav_bytes, data, rate, format='wav')
|
||||||
|
|
||||||
|
return wav_bytes
|
||||||
|
|
||||||
|
|
||||||
|
def pack_aac(audio_bytes, data, rate):
|
||||||
|
process = subprocess.Popen([
|
||||||
|
'ffmpeg',
|
||||||
|
'-f', 's16le', # 输入16位有符号小端整数PCM
|
||||||
|
'-ar', str(rate), # 设置采样率
|
||||||
|
'-ac', '1', # 单声道
|
||||||
|
'-i', 'pipe:0', # 从管道读取输入
|
||||||
|
'-c:a', 'aac', # 音频编码器为AAC
|
||||||
|
'-b:a', '192k', # 比特率
|
||||||
|
'-vn', # 不包含视频
|
||||||
|
'-f', 'adts', # 输出AAC数据流格式
|
||||||
|
'pipe:1' # 将输出写入管道
|
||||||
|
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
out, _ = process.communicate(input=data.tobytes())
|
||||||
|
audio_bytes.write(out)
|
||||||
|
|
||||||
|
return audio_bytes
|
||||||
|
|
||||||
|
|
||||||
|
def read_clean_buffer(audio_bytes):
|
||||||
|
audio_chunk = audio_bytes.getvalue()
|
||||||
|
audio_bytes.truncate(0)
|
||||||
|
audio_bytes.seek(0)
|
||||||
|
|
||||||
|
return audio_bytes, audio_chunk
|
||||||
|
|
||||||
|
|
||||||
|
def cut_text(text, punc):
|
||||||
|
punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}]
|
||||||
|
if len(punc_list) > 0:
|
||||||
|
punds = r"[" + "".join(punc_list) + r"]"
|
||||||
|
text = text.strip("\n")
|
||||||
|
items = re.split(f"({punds})", text)
|
||||||
|
mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])]
|
||||||
|
# 在句子不存在符号或句尾无符号的时候保证文本完整
|
||||||
|
if len(items)%2 == 1:
|
||||||
|
mergeitems.append(items[-1])
|
||||||
|
text = "\n".join(mergeitems)
|
||||||
|
|
||||||
|
while "\n\n" in text:
|
||||||
|
text = text.replace("\n\n", "\n")
|
||||||
|
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def only_punc(text):
|
||||||
|
return not any(t.isalnum() or t.isalpha() for t in text)
|
||||||
|
|
||||||
|
|
||||||
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language):
|
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language):
|
||||||
@ -374,25 +422,19 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language)
|
|||||||
codes = vq_model.extract_latent(ssl_content)
|
codes = vq_model.extract_latent(ssl_content)
|
||||||
prompt_semantic = codes[0, 0]
|
prompt_semantic = codes[0, 0]
|
||||||
t1 = ttime()
|
t1 = ttime()
|
||||||
prompt_language = dict_language[prompt_language]
|
prompt_language = dict_language[prompt_language.lower()]
|
||||||
text_language = dict_language[text_language]
|
text_language = dict_language[text_language.lower()]
|
||||||
phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language)
|
phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language)
|
||||||
phones1 = cleaned_text_to_sequence(phones1)
|
|
||||||
texts = text.split("\n")
|
texts = text.split("\n")
|
||||||
audio_opt = []
|
audio_bytes = BytesIO()
|
||||||
|
|
||||||
for text in texts:
|
for text in texts:
|
||||||
phones2, word2ph2, norm_text2 = clean_text(text, text_language)
|
# 简单防止纯符号引发参考音频泄露
|
||||||
phones2 = cleaned_text_to_sequence(phones2)
|
if only_punc(text):
|
||||||
if (prompt_language == "zh"):
|
continue
|
||||||
bert1 = get_bert_feature(norm_text1, word2ph1).to(device)
|
|
||||||
else:
|
audio_opt = []
|
||||||
bert1 = torch.zeros((1024, len(phones1)), dtype=torch.float16 if is_half == True else torch.float32).to(
|
phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language)
|
||||||
device)
|
|
||||||
if (text_language == "zh"):
|
|
||||||
bert2 = get_bert_feature(norm_text2, word2ph2).to(device)
|
|
||||||
else:
|
|
||||||
bert2 = torch.zeros((1024, len(phones2))).to(bert1)
|
|
||||||
bert = torch.cat([bert1, bert2], 1)
|
bert = torch.cat([bert1, bert2], 1)
|
||||||
|
|
||||||
all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
|
all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
|
||||||
@ -426,8 +468,17 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language)
|
|||||||
audio_opt.append(audio)
|
audio_opt.append(audio)
|
||||||
audio_opt.append(zero_wav)
|
audio_opt.append(zero_wav)
|
||||||
t4 = ttime()
|
t4 = ttime()
|
||||||
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
|
audio_bytes = pack_audio(audio_bytes,(np.concatenate(audio_opt, 0) * 32768).astype(np.int16),hps.data.sampling_rate)
|
||||||
yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16)
|
# logger.info("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
|
||||||
|
if stream_mode == "normal":
|
||||||
|
audio_bytes, audio_chunk = read_clean_buffer(audio_bytes)
|
||||||
|
yield audio_chunk
|
||||||
|
|
||||||
|
if not stream_mode == "normal":
|
||||||
|
if media_type == "wav":
|
||||||
|
audio_bytes = pack_wav(audio_bytes,hps.data.sampling_rate)
|
||||||
|
yield audio_bytes.getvalue()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def handle_control(command):
|
def handle_control(command):
|
||||||
@ -449,15 +500,16 @@ def handle_change(path, text, language):
|
|||||||
if language != "" or language is not None:
|
if language != "" or language is not None:
|
||||||
default_refer.language = language
|
default_refer.language = language
|
||||||
|
|
||||||
print(f"[INFO] 当前默认参考音频路径: {default_refer.path}")
|
logger.info(f"当前默认参考音频路径: {default_refer.path}")
|
||||||
print(f"[INFO] 当前默认参考音频文本: {default_refer.text}")
|
logger.info(f"当前默认参考音频文本: {default_refer.text}")
|
||||||
print(f"[INFO] 当前默认参考音频语种: {default_refer.language}")
|
logger.info(f"当前默认参考音频语种: {default_refer.language}")
|
||||||
print(f"[INFO] is_ready: {default_refer.is_ready()}")
|
logger.info(f"is_ready: {default_refer.is_ready()}")
|
||||||
|
|
||||||
|
|
||||||
return JSONResponse({"code": 0, "message": "Success"}, status_code=200)
|
return JSONResponse({"code": 0, "message": "Success"}, status_code=200)
|
||||||
|
|
||||||
|
|
||||||
def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
|
def handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc):
|
||||||
if (
|
if (
|
||||||
refer_wav_path == "" or refer_wav_path is None
|
refer_wav_path == "" or refer_wav_path is None
|
||||||
or prompt_text == "" or prompt_text is None
|
or prompt_text == "" or prompt_text is None
|
||||||
@ -471,24 +523,145 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
|
|||||||
if not default_refer.is_ready():
|
if not default_refer.is_ready():
|
||||||
return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400)
|
return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400)
|
||||||
|
|
||||||
with torch.no_grad():
|
if cut_punc == None:
|
||||||
gen = get_tts_wav(
|
text = cut_text(text,default_cut_punc)
|
||||||
refer_wav_path, prompt_text, prompt_language, text, text_language
|
else:
|
||||||
)
|
text = cut_text(text,cut_punc)
|
||||||
sampling_rate, audio_data = next(gen)
|
|
||||||
|
|
||||||
wav = BytesIO()
|
return StreamingResponse(get_tts_wav(refer_wav_path, prompt_text, prompt_language, text, text_language), media_type="audio/"+media_type)
|
||||||
sf.write(wav, audio_data, sampling_rate, format="wav")
|
|
||||||
wav.seek(0)
|
|
||||||
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
return StreamingResponse(wav, media_type="audio/wav")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------
|
||||||
|
# 初始化部分
|
||||||
|
# --------------------------------
|
||||||
|
now_dir = os.getcwd()
|
||||||
|
sys.path.append(now_dir)
|
||||||
|
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
||||||
|
|
||||||
|
dict_language = {
|
||||||
|
"中文": "all_zh",
|
||||||
|
"英文": "en",
|
||||||
|
"日文": "all_ja",
|
||||||
|
"中英混合": "zh",
|
||||||
|
"日英混合": "ja",
|
||||||
|
"多语种混合": "auto", #多语种启动切分识别语种
|
||||||
|
"all_zh": "all_zh",
|
||||||
|
"en": "en",
|
||||||
|
"all_ja": "all_ja",
|
||||||
|
"zh": "zh",
|
||||||
|
"ja": "ja",
|
||||||
|
"auto": "auto",
|
||||||
|
}
|
||||||
|
|
||||||
|
# logger
|
||||||
|
logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG)
|
||||||
|
logger = logging.getLogger('uvicorn')
|
||||||
|
|
||||||
|
# 获取配置
|
||||||
|
g_config = global_config.Config()
|
||||||
|
|
||||||
|
# 获取参数
|
||||||
|
parser = argparse.ArgumentParser(description="GPT-SoVITS api")
|
||||||
|
|
||||||
|
parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径")
|
||||||
|
parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径")
|
||||||
|
parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径")
|
||||||
|
parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本")
|
||||||
|
parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种")
|
||||||
|
parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu")
|
||||||
|
parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0")
|
||||||
|
parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880")
|
||||||
|
parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度")
|
||||||
|
parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度")
|
||||||
|
# bool值的用法为 `python ./api.py -fp ...`
|
||||||
|
# 此时 full_precision==True, half_precision==False
|
||||||
|
parser.add_argument("-sm", "--stream_mode", type=str, default="close", help="流式返回模式, close / normal / keepalive")
|
||||||
|
parser.add_argument("-mt", "--media_type", type=str, default="wav", help="音频编码格式, wav / ogg / aac")
|
||||||
|
parser.add_argument("-cp", "--cut_punc", type=str, default="", help="文本切分符号设定, 符号范围,.;?!、,。?!;:…")
|
||||||
|
# 切割常用分句符为 `python ./api.py -cp ".?!。?!"`
|
||||||
|
parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path")
|
||||||
|
parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
sovits_path = args.sovits_path
|
||||||
|
gpt_path = args.gpt_path
|
||||||
|
device = args.device
|
||||||
|
port = args.port
|
||||||
|
host = args.bind_addr
|
||||||
|
cnhubert_base_path = args.hubert_path
|
||||||
|
bert_path = args.bert_path
|
||||||
|
default_cut_punc = args.cut_punc
|
||||||
|
|
||||||
|
# 应用参数配置
|
||||||
|
default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language)
|
||||||
|
|
||||||
|
# 模型路径检查
|
||||||
|
if sovits_path == "":
|
||||||
|
sovits_path = g_config.pretrained_sovits_path
|
||||||
|
logger.warn(f"未指定SoVITS模型路径, fallback后当前值: {sovits_path}")
|
||||||
|
if gpt_path == "":
|
||||||
|
gpt_path = g_config.pretrained_gpt_path
|
||||||
|
logger.warn(f"未指定GPT模型路径, fallback后当前值: {gpt_path}")
|
||||||
|
|
||||||
|
# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用
|
||||||
|
if default_refer.path == "" or default_refer.text == "" or default_refer.language == "":
|
||||||
|
default_refer.path, default_refer.text, default_refer.language = "", "", ""
|
||||||
|
logger.info("未指定默认参考音频")
|
||||||
|
else:
|
||||||
|
logger.info(f"默认参考音频路径: {default_refer.path}")
|
||||||
|
logger.info(f"默认参考音频文本: {default_refer.text}")
|
||||||
|
logger.info(f"默认参考音频语种: {default_refer.language}")
|
||||||
|
|
||||||
|
# 获取半精度
|
||||||
|
is_half = g_config.is_half
|
||||||
|
if args.full_precision:
|
||||||
|
is_half = False
|
||||||
|
if args.half_precision:
|
||||||
|
is_half = True
|
||||||
|
if args.full_precision and args.half_precision:
|
||||||
|
is_half = g_config.is_half # 炒饭fallback
|
||||||
|
logger.info(f"半精: {is_half}")
|
||||||
|
|
||||||
|
# 流式返回模式
|
||||||
|
if args.stream_mode.lower() in ["normal","n"]:
|
||||||
|
stream_mode = "normal"
|
||||||
|
logger.info("流式返回已开启")
|
||||||
|
else:
|
||||||
|
stream_mode = "close"
|
||||||
|
|
||||||
|
# 音频编码格式
|
||||||
|
if args.media_type.lower() in ["aac","ogg"]:
|
||||||
|
media_type = args.media_type.lower()
|
||||||
|
elif stream_mode == "close":
|
||||||
|
media_type = "wav"
|
||||||
|
else:
|
||||||
|
media_type = "ogg"
|
||||||
|
logger.info(f"编码格式: {media_type}")
|
||||||
|
|
||||||
|
# 初始化模型
|
||||||
|
cnhubert.cnhubert_base_path = cnhubert_base_path
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
||||||
|
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
|
||||||
|
ssl_model = cnhubert.get_model()
|
||||||
|
if is_half:
|
||||||
|
bert_model = bert_model.half().to(device)
|
||||||
|
ssl_model = ssl_model.half().to(device)
|
||||||
|
else:
|
||||||
|
bert_model = bert_model.to(device)
|
||||||
|
ssl_model = ssl_model.to(device)
|
||||||
|
change_sovits_weights(sovits_path)
|
||||||
|
change_gpt_weights(gpt_path)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------
|
||||||
|
# 接口部分
|
||||||
|
# --------------------------------
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
|
|
||||||
#clark新增-----2024-02-21
|
|
||||||
#可在启动后动态修改模型,以此满足同一个api不同的朗读者请求
|
|
||||||
@app.post("/set_model")
|
@app.post("/set_model")
|
||||||
async def set_model(request: Request):
|
async def set_model(request: Request):
|
||||||
json_post_raw = await request.json()
|
json_post_raw = await request.json()
|
||||||
@ -496,11 +669,11 @@ async def set_model(request: Request):
|
|||||||
gpt_path=json_post_raw.get("gpt_model_path")
|
gpt_path=json_post_raw.get("gpt_model_path")
|
||||||
global sovits_path
|
global sovits_path
|
||||||
sovits_path=json_post_raw.get("sovits_model_path")
|
sovits_path=json_post_raw.get("sovits_model_path")
|
||||||
print("gptpath"+gpt_path+";vitspath"+sovits_path)
|
logger.info("gptpath"+gpt_path+";vitspath"+sovits_path)
|
||||||
change_sovits_weights(sovits_path)
|
change_sovits_weights(sovits_path)
|
||||||
change_gpt_weights(gpt_path)
|
change_gpt_weights(gpt_path)
|
||||||
return "ok"
|
return "ok"
|
||||||
# 新增-----end------
|
|
||||||
|
|
||||||
@app.post("/control")
|
@app.post("/control")
|
||||||
async def control(request: Request):
|
async def control(request: Request):
|
||||||
@ -541,6 +714,7 @@ async def tts_endpoint(request: Request):
|
|||||||
json_post_raw.get("prompt_language"),
|
json_post_raw.get("prompt_language"),
|
||||||
json_post_raw.get("text"),
|
json_post_raw.get("text"),
|
||||||
json_post_raw.get("text_language"),
|
json_post_raw.get("text_language"),
|
||||||
|
json_post_raw.get("cut_punc"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -551,8 +725,9 @@ async def tts_endpoint(
|
|||||||
prompt_language: str = None,
|
prompt_language: str = None,
|
||||||
text: str = None,
|
text: str = None,
|
||||||
text_language: str = None,
|
text_language: str = None,
|
||||||
|
cut_punc: str = None,
|
||||||
):
|
):
|
||||||
return handle(refer_wav_path, prompt_text, prompt_language, text, text_language)
|
return handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -141,7 +141,15 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker
|
|||||||
|
|
||||||
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
|
- [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights)
|
||||||
|
|
||||||
对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。
|
对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/asr/models` 中。
|
||||||
|
|
||||||
|
对于英语与日语自动语音识别(附加),从 [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) 下载模型,并将它们放置在 `tools/asr/models` 中。 此外,[其他模型](https://huggingface.co/Systran)可能具有类似效果,但占用更小的磁盘空间。
|
||||||
|
|
||||||
|
中国地区用户可以通过以下链接下载:
|
||||||
|
- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3)(点击“下载副本”)
|
||||||
|
|
||||||
|
- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3)(Hugging Face镜像站)
|
||||||
|
|
||||||
|
|
||||||
## 数据集格式
|
## 数据集格式
|
||||||
|
|
||||||
@ -204,13 +212,13 @@ python audio_slicer.py \
|
|||||||
````
|
````
|
||||||
这是使用命令行完成数据集ASR处理的方式(仅限中文)
|
这是使用命令行完成数据集ASR处理的方式(仅限中文)
|
||||||
````
|
````
|
||||||
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
|
python tools/asr/funasr_asr.py -i <input> -o <output>
|
||||||
````
|
````
|
||||||
通过Faster_Whisper进行ASR处理(除中文之外的ASR标记)
|
通过Faster_Whisper进行ASR处理(除中文之外的ASR标记)
|
||||||
|
|
||||||
(没有进度条,GPU性能可能会导致时间延迟)
|
(没有进度条,GPU性能可能会导致时间延迟)
|
||||||
````
|
````
|
||||||
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
|
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
|
||||||
````
|
````
|
||||||
启用自定义列表保存路径
|
启用自定义列表保存路径
|
||||||
## 致谢
|
## 致谢
|
||||||
|
@ -127,7 +127,7 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker
|
|||||||
|
|
||||||
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。
|
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。
|
||||||
|
|
||||||
中国語 ASR(追加)については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/damo_asr/models` に置いてください。
|
中国語 ASR(追加)については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/asr/models` に置いてください。
|
||||||
|
|
||||||
UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。
|
UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
|||||||
- [ ] **優先度 高:**
|
- [ ] **優先度 高:**
|
||||||
|
|
||||||
- [x] 日本語と英語でのローカライズ。
|
- [x] 日本語と英語でのローカライズ。
|
||||||
- [ ] ユーザーガイド。
|
- [] ユーザーガイド。
|
||||||
- [x] 日本語データセットと英語データセットのファインチューニングトレーニング。
|
- [x] 日本語データセットと英語データセットのファインチューニングトレーニング。
|
||||||
|
|
||||||
- [ ] **機能:**
|
- [ ] **機能:**
|
||||||
@ -192,13 +192,13 @@ python audio_slicer.py \
|
|||||||
```
|
```
|
||||||
コマンドラインを使用してデータセット ASR 処理を行う方法です (中国語のみ)
|
コマンドラインを使用してデータセット ASR 処理を行う方法です (中国語のみ)
|
||||||
```
|
```
|
||||||
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
|
python tools/asr/funasr_asr.py -i <input> -o <output>
|
||||||
```
|
```
|
||||||
ASR処理はFaster_Whisperを通じて実行されます(中国語を除くASRマーキング)
|
ASR処理はFaster_Whisperを通じて実行されます(中国語を除くASRマーキング)
|
||||||
|
|
||||||
(進行状況バーは表示されません。GPU のパフォーマンスにより時間遅延が発生する可能性があります)
|
(進行状況バーは表示されません。GPU のパフォーマンスにより時間遅延が発生する可能性があります)
|
||||||
```
|
```
|
||||||
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
|
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
|
||||||
```
|
```
|
||||||
カスタムリストの保存パスが有効になっています
|
カスタムリストの保存パスが有効になっています
|
||||||
## クレジット
|
## クレジット
|
||||||
|
@ -130,7 +130,7 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker
|
|||||||
|
|
||||||
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS)에서 사전 훈련된 모델을 다운로드하고 `GPT_SoVITS\pretrained_models`에 넣습니다.
|
[GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS)에서 사전 훈련된 모델을 다운로드하고 `GPT_SoVITS\pretrained_models`에 넣습니다.
|
||||||
|
|
||||||
중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/damo_asr/models`에 넣습니다.
|
중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/asr/models`에 넣습니다.
|
||||||
|
|
||||||
UVR5(음성/반주 분리 및 잔향 제거)를 위해 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights)에서 모델을 다운로드하고 `tools/uvr5/uvr5_weights`에 넣습니다.
|
UVR5(음성/반주 분리 및 잔향 제거)를 위해 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights)에서 모델을 다운로드하고 `tools/uvr5/uvr5_weights`에 넣습니다.
|
||||||
|
|
||||||
@ -196,13 +196,13 @@ python audio_slicer.py \
|
|||||||
```
|
```
|
||||||
명령줄을 사용하여 데이터 세트 ASR 처리를 수행하는 방법입니다(중국어만 해당).
|
명령줄을 사용하여 데이터 세트 ASR 처리를 수행하는 방법입니다(중국어만 해당).
|
||||||
```
|
```
|
||||||
python tools/damo_asr/cmd-asr.py "<Path to the directory containing input audio files>"
|
python tools/asr/funasr_asr.py -i <input> -o <output>
|
||||||
```
|
```
|
||||||
ASR 처리는 Faster_Whisper(중국어를 제외한 ASR 마킹)를 통해 수행됩니다.
|
ASR 처리는 Faster_Whisper(중국어를 제외한 ASR 마킹)를 통해 수행됩니다.
|
||||||
|
|
||||||
(진행률 표시줄 없음, GPU 성능으로 인해 시간 지연이 발생할 수 있음)
|
(진행률 표시줄 없음, GPU 성능으로 인해 시간 지연이 발생할 수 있음)
|
||||||
```
|
```
|
||||||
python ./tools/damo_asr/WhisperASR.py -i <input> -o <output> -f <file_name.list> -l <language>
|
python ./tools/asr/fasterwhisper_asr.py -i <input> -o <output> -l <language>
|
||||||
```
|
```
|
||||||
사용자 정의 목록 저장 경로가 활성화되었습니다.
|
사용자 정의 목록 저장 경로가 활성화되었습니다.
|
||||||
## 감사의 말
|
## 감사의 말
|
||||||
|
@ -2,6 +2,18 @@
|
|||||||
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
"很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练",
|
||||||
"UVR5已开启": "UVR5已开启",
|
"UVR5已开启": "UVR5已开启",
|
||||||
"UVR5已关闭": "UVR5已关闭",
|
"UVR5已关闭": "UVR5已关闭",
|
||||||
|
"输入文件夹路径": "输入文件夹路径",
|
||||||
|
"输出文件夹路径": "输出文件夹路径",
|
||||||
|
"ASR 模型": "ASR 模型",
|
||||||
|
"ASR 模型尺寸": "ASR 模型尺寸",
|
||||||
|
"ASR 语言设置": "ASR 语言设置",
|
||||||
|
"模型切换": "模型切换",
|
||||||
|
"是否开启dpo训练选项(实验性)": "是否开启dpo训练选项(实验性)",
|
||||||
|
"开启无参考文本模式。不填参考文本亦相当于开启。": "开启无参考文本模式。不填参考文本亦相当于开启。",
|
||||||
|
"使用无参考文本模式时建议使用微调的GPT": "使用无参考文本模式时建议使用微调的GPT",
|
||||||
|
"后续将支持转音素、手工修改音素、语音合成分步执行。": "后续将支持转音素、手工修改音素、语音合成分步执行。",
|
||||||
|
"gpt采样参数(无参考文本时不要太低):": "gpt采样参数(无参考文本时不要太低):",
|
||||||
|
"按标点符号切": "按标点符号切",
|
||||||
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.",
|
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.",
|
||||||
"0-前置数据集获取工具": "0-前置数据集获取工具",
|
"0-前置数据集获取工具": "0-前置数据集获取工具",
|
||||||
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",
|
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user