diff --git a/GPT_SoVITS/text/engdict-hot.rep b/GPT_SoVITS/text/engdict-hot.rep index 22d4cd28..120e5ef6 100644 --- a/GPT_SoVITS/text/engdict-hot.rep +++ b/GPT_SoVITS/text/engdict-hot.rep @@ -1 +1,2 @@ -CHATGPT CH AE1 T JH IY1 P IY1 T IY1 \ No newline at end of file +CHATGPT CH AE1 T JH IY1 P IY1 T IY1 +JSON JH EY1 S AH0 N \ No newline at end of file diff --git a/GPT_SoVITS/text/english.py b/GPT_SoVITS/text/english.py index 09e20bdd..68ce7896 100644 --- a/GPT_SoVITS/text/english.py +++ b/GPT_SoVITS/text/english.py @@ -8,11 +8,19 @@ from string import punctuation from text import symbols +import unicodedata +from builtins import str as unicode +from g2p_en.expand import normalize_numbers +from nltk.tokenize import TweetTokenizer +word_tokenize = TweetTokenizer().tokenize +from nltk import pos_tag + current_file_path = os.path.dirname(__file__) CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep") CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep") CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep") CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle") +NAMECACHE_PATH = os.path.join(current_file_path, "namedict_cache.pickle") arpa = { "AH0", @@ -155,6 +163,9 @@ def read_dict_new(): line_index = line_index + 1 line = f.readline() + return g2p_dict + +def hot_reload_hot(g2p_dict): with open(CMU_DICT_HOT_PATH) as f: line = f.readline() line_index = 1 @@ -168,7 +179,7 @@ def read_dict_new(): line_index = line_index + 1 line = f.readline() - + return g2p_dict @@ -185,10 +196,19 @@ def get_dict(): g2p_dict = read_dict_new() cache_dict(g2p_dict, CACHE_PATH) + g2p_dict = hot_reload_hot(g2p_dict) + return g2p_dict -eng_dict = get_dict() +def get_namedict(): + if os.path.exists(NAMECACHE_PATH): + with open(NAMECACHE_PATH, "rb") as pickle_file: + name_dict = pickle.load(pickle_file) + else: + name_dict = {} + + return name_dict def text_normalize(text): @@ -204,6 +224,16 @@ def text_normalize(text): for p, r in rep_map.items(): text = re.sub(p, r, text) + # 来自 g2p_en 文本格式化处理 + # 增加大写兼容 + text = unicode(text) + text = normalize_numbers(text) + text = ''.join(char for char in unicodedata.normalize('NFD', text) + if unicodedata.category(char) != 'Mn') # Strip accents + text = re.sub("[^ A-Za-z'.,?!\-]", "", text) + text = re.sub(r"(?i)i\.e\.", "that is", text) + text = re.sub(r"(?i)e\.g\.", "for example", text) + return text @@ -213,37 +243,106 @@ class en_G2p(G2p): # 分词初始化 wordsegment.load() - # 扩展过时字典 + # 扩展过时字典, 添加姓名字典 self.cmu = get_dict() + self.namedict = get_namedict() # 剔除读音错误的几个缩写 for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]: del self.cmu[word.lower()] - # "A" 落单不读 "AH0" 读 "EY1" - self.cmu['a'] = [['EY1']] + # 修正多音字 + self.homograph2features["read"] = (['R', 'IY1', 'D'], ['R', 'EH1', 'D'], 'VBP') + self.homograph2features["complex"] = (['K', 'AH0', 'M', 'P', 'L', 'EH1', 'K', 'S'], ['K', 'AA1', 'M', 'P', 'L', 'EH0', 'K', 'S'], 'JJ') - def predict(self, word): - # 小写 oov 长度小于等于 3 直接读字母 - if (len(word) <= 3): - return [phone for w in word for phone in self(w)] + def __call__(self, text): + # tokenization + words = word_tokenize(text) + tokens = pos_tag(words) # tuples of (word, tag) + + # steps + prons = [] + for o_word, pos in tokens: + # 还原 g2p_en 小写操作逻辑 + word = o_word.lower() + + if re.search("[a-z]", word) is None: + pron = [word] + # 先把单字母推出去 + elif len(word) == 1: + # 单读 A 发音修正, 这里需要原格式 o_word 判断大写 + if o_word == "A": + pron = ['EY1'] + else: + pron = self.cmu[word][0] + # g2p_en 原版多音字处理 + elif word in self.homograph2features: # Check homograph + pron1, pron2, pos1 = self.homograph2features[word] + if pos.startswith(pos1): + pron = pron1 + # pos1比pos长仅出现在read + elif len(pos) < len(pos1) and pos == pos1[:len(pos)]: + pron = pron1 + else: + pron = pron2 + else: + # 递归查找预测 + pron = self.qryword(o_word) + + prons.extend(pron) + prons.extend([" "]) + + return prons[:-1] + + + def qryword(self, o_word): + word = o_word.lower() + + # 查字典, 单字母除外 + if len(word) > 1 and word in self.cmu: # lookup CMU dict + return self.cmu[word][0] + + # 单词仅首字母大写时查找姓名字典 + if o_word.istitle() and word in self.namedict: + return self.namedict[word][0] + + # oov 长度小于等于 3 直接读字母 + if len(word) <= 3: + phones = [] + for w in word: + # 单读 A 发音修正, 此处不存在大写的情况 + if w == "a": + phones.extend(['EY1']) + else: + phones.extend(self.cmu[w][0]) + return phones # 尝试分离所有格 if re.match(r"^([a-z]+)('s)$", word): - phone = self(word[:-2]) - phone.extend(['Z']) - return phone + phones = self.qryword(word[:-2]) + # P T K F TH HH 无声辅音结尾 's 发 ['S'] + if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']: + phones.extend(['S']) + # S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z'] + elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']: + phones.extend(['AH0', 'Z']) + # B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z'] + # AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2 + # ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z'] + else: + phones.extend(['Z']) + return phones # 尝试进行分词,应对复合词 comps = wordsegment.segment(word.lower()) # 无法分词的送回去预测 if len(comps)==1: - return super().predict(word) + return self.predict(word) # 可以分词的递归处理 - return [phone for comp in comps for phone in self(comp)] + return [phone for comp in comps for phone in self.qryword(comp)] _g2p = en_G2p() @@ -258,12 +357,6 @@ def g2p(text): if __name__ == "__main__": - # print(get_dict()) print(g2p("hello")) - print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")) - # all_phones = set() - # for k, syllables in eng_dict.items(): - # for group in syllables: - # for ph in group: - # all_phones.add(ph) - # print(all_phones) + print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture."))) + print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))) \ No newline at end of file diff --git a/GPT_SoVITS/text/namedict_cache.pickle b/GPT_SoVITS/text/namedict_cache.pickle new file mode 100644 index 00000000..a9a9dc39 Binary files /dev/null and b/GPT_SoVITS/text/namedict_cache.pickle differ diff --git a/README.md b/README.md index c53ab766..b5daa6b2 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,15 @@ Users in China region can download these two models by entering the links below - [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights) -For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/damo_asr/models`. +For Chinese ASR (additionally), download models from [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), and [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) and place them in `tools/asr/models`. + +For English or Japanese ASR (additionally), download models from [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) and place them in `tools/asr/models`. Also, [other models](https://huggingface.co/Systran) may have the similar effect with smaller disk footprint. + +Users in China region can download this model by entering the links below + +- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3) (clicking "Download a copy") + +- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3) (HuggingFace mirror site) ## Dataset Format @@ -204,13 +212,13 @@ python audio_slicer.py \ ``` This is how dataset ASR processing is done using the command line(Only Chinese) ``` -python tools/damo_asr/cmd-asr.py "" +python tools/asr/funasr_asr.py -i -o ``` ASR processing is performed through Faster_Whisper(ASR marking except Chinese) (No progress bars, GPU performance may cause time delays) ``` -python ./tools/damo_asr/WhisperASR.py -i -o -f -l +python ./tools/asr/fasterwhisper_asr.py -i -o -l ``` A custom list save path is enabled diff --git a/api.py b/api.py index 34adfbe9..ea0e39d0 100644 --- a/api.py +++ b/api.py @@ -18,6 +18,9 @@ `-p` - `绑定端口, 默认9880, 可在 config.py 中指定` `-fp` - `覆盖 config.py 使用全精度` `-hp` - `覆盖 config.py 使用半精度` +`-sm` - `流式返回模式, 默认不启用, "close","c", "normal","n", "keepalive","k"` +·-mt` - `返回的音频编码格式, 流式默认ogg, 非流式默认wav, "wav", "ogg", "aac"` +·-cp` - `文本切分符号设定, 默认为空, 以",.,。"字符串的方式传入` `-hb` - `cnhubert路径` `-b` - `bert路径` @@ -39,6 +42,18 @@ POST: } ``` +使用执行参数指定的参考音频并设定分割符号: +GET: + `http://127.0.0.1:9880?text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh&cut_punc=,。` +POST: +```json +{ + "text": "先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。", + "text_language": "zh", + "cut_punc": ",。", +} +``` + 手动指定当次推理所使用的参考音频: GET: `http://127.0.0.1:9880?refer_wav_path=123.wav&prompt_text=一二三。&prompt_language=zh&text=先帝创业未半而中道崩殂,今天下三分,益州疲弊,此诚危急存亡之秋也。&text_language=zh` @@ -103,14 +118,10 @@ RESP: 无 import argparse -import os +import os,re import sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -sys.path.append("%s/GPT_SoVITS" % (now_dir)) - import signal +import LangSegment from time import time as ttime import torch import librosa @@ -129,35 +140,8 @@ from text.cleaner import clean_text from module.mel_processing import spectrogram_torch from my_utils import load_audio import config as global_config - -g_config = global_config.Config() - -# AVAILABLE_COMPUTE = "cuda" if torch.cuda.is_available() else "cpu" - -parser = argparse.ArgumentParser(description="GPT-SoVITS api") - -parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径") -parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径") - -parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径") -parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") -parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") - -parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") -parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0") -parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") -parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") -parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度") -# bool值的用法为 `python ./api.py -fp ...` -# 此时 full_precision==True, half_precision==False - -parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path") -parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path") - -args = parser.parse_args() - -sovits_path = args.sovits_path -gpt_path = args.gpt_path +import logging +import subprocess class DefaultRefer: @@ -170,50 +154,6 @@ class DefaultRefer: return is_full(self.path, self.text, self.language) -default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language) - -device = args.device -port = args.port -host = args.bind_addr - -if sovits_path == "": - sovits_path = g_config.pretrained_sovits_path - print(f"[WARN] 未指定SoVITS模型路径, fallback后当前值: {sovits_path}") -if gpt_path == "": - gpt_path = g_config.pretrained_gpt_path - print(f"[WARN] 未指定GPT模型路径, fallback后当前值: {gpt_path}") - -# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用 -if default_refer.path == "" or default_refer.text == "" or default_refer.language == "": - default_refer.path, default_refer.text, default_refer.language = "", "", "" - print("[INFO] 未指定默认参考音频") -else: - print(f"[INFO] 默认参考音频路径: {default_refer.path}") - print(f"[INFO] 默认参考音频文本: {default_refer.text}") - print(f"[INFO] 默认参考音频语种: {default_refer.language}") - -is_half = g_config.is_half -if args.full_precision: - is_half = False -if args.half_precision: - is_half = True -if args.full_precision and args.half_precision: - is_half = g_config.is_half # 炒饭fallback - -print(f"[INFO] 半精: {is_half}") - -cnhubert_base_path = args.hubert_path -bert_path = args.bert_path - -cnhubert.cnhubert_base_path = cnhubert_base_path -tokenizer = AutoTokenizer.from_pretrained(bert_path) -bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) -if is_half: - bert_model = bert_model.half().to(device) -else: - bert_model = bert_model.to(device) - - def is_empty(*items): # 任意一项不为空返回False for item in items: if item is not None and item != "": @@ -227,6 +167,7 @@ def is_full(*items): # 任意一项为空返回False return False return True + def change_sovits_weights(sovits_path): global vq_model, hps dict_s2 = torch.load(sovits_path, map_location="cpu") @@ -246,9 +187,9 @@ def change_sovits_weights(sovits_path): else: vq_model = vq_model.to(device) vq_model.eval() - print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) - with open("./sweight.txt", "w", encoding="utf-8") as f: - f.write(sovits_path) + vq_model.load_state_dict(dict_s2["weight"], strict=False) + + def change_gpt_weights(gpt_path): global hz, max_sec, t2s_model, config hz = 50 @@ -262,8 +203,7 @@ def change_gpt_weights(gpt_path): t2s_model = t2s_model.to(device) t2s_model.eval() total = sum([param.nelement() for param in t2s_model.parameters()]) - print("Number of parameter: %.2fM" % (total / 1e6)) - with open("./gweight.txt", "w", encoding="utf-8") as f: f.write(gpt_path) + logger.info("Number of parameter: %.2fM" % (total / 1e6)) def get_bert_feature(text, word2ph): @@ -283,9 +223,81 @@ def get_bert_feature(text, word2ph): return phone_level_feature.T -n_semantic = 1024 -dict_s2 = torch.load(sovits_path, map_location="cpu") -hps = dict_s2["config"] +def clean_text_inf(text, language): + phones, word2ph, norm_text = clean_text(text, language) + phones = cleaned_text_to_sequence(phones) + return phones, word2ph, norm_text + + +def get_bert_inf(phones, word2ph, norm_text, language): + language=language.replace("all_","") + if language == "zh": + bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype) + else: + bert = torch.zeros( + (1024, len(phones)), + dtype=torch.float16 if is_half == True else torch.float32, + ).to(device) + + return bert + + +def get_phones_and_bert(text,language): + if language in {"en","all_zh","all_ja"}: + language = language.replace("all_","") + if language == "en": + LangSegment.setfilters(["en"]) + formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text)) + else: + # 因无法区别中日文汉字,以用户输入为准 + formattext = text + while " " in formattext: + formattext = formattext.replace(" ", " ") + phones, word2ph, norm_text = clean_text_inf(formattext, language) + if language == "zh": + bert = get_bert_feature(norm_text, word2ph).to(device) + else: + bert = torch.zeros( + (1024, len(phones)), + dtype=torch.float16 if is_half == True else torch.float32, + ).to(device) + elif language in {"zh", "ja","auto"}: + textlist=[] + langlist=[] + LangSegment.setfilters(["zh","ja","en","ko"]) + if language == "auto": + for tmp in LangSegment.getTexts(text): + if tmp["lang"] == "ko": + langlist.append("zh") + textlist.append(tmp["text"]) + else: + langlist.append(tmp["lang"]) + textlist.append(tmp["text"]) + else: + for tmp in LangSegment.getTexts(text): + if tmp["lang"] == "en": + langlist.append(tmp["lang"]) + else: + # 因无法区别中日文汉字,以用户输入为准 + langlist.append(language) + textlist.append(tmp["text"]) + # logger.info(textlist) + # logger.info(langlist) + phones_list = [] + bert_list = [] + norm_text_list = [] + for i in range(len(textlist)): + lang = langlist[i] + phones, word2ph, norm_text = clean_text_inf(textlist[i], lang) + bert = get_bert_inf(phones, word2ph, norm_text, lang) + phones_list.append(phones) + norm_text_list.append(norm_text) + bert_list.append(bert) + bert = torch.cat(bert_list, dim=1) + phones = sum(phones_list, []) + norm_text = ''.join(norm_text_list) + + return phones,bert.to(torch.float16 if is_half == True else torch.float32),norm_text class DictToAttrRecursive: @@ -298,39 +310,6 @@ class DictToAttrRecursive: setattr(self, key, value) -hps = DictToAttrRecursive(hps) -hps.model.semantic_frame_rate = "25hz" -dict_s1 = torch.load(gpt_path, map_location="cpu") -config = dict_s1["config"] -ssl_model = cnhubert.get_model() -if is_half: - ssl_model = ssl_model.half().to(device) -else: - ssl_model = ssl_model.to(device) - -vq_model = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) -if is_half: - vq_model = vq_model.half().to(device) -else: - vq_model = vq_model.to(device) -vq_model.eval() -print(vq_model.load_state_dict(dict_s2["weight"], strict=False)) -hz = 50 -max_sec = config['data']['max_sec'] -t2s_model = Text2SemanticLightningModule(config, "****", is_train=False) -t2s_model.load_state_dict(dict_s1["weight"]) -if is_half: - t2s_model = t2s_model.half() -t2s_model = t2s_model.to(device) -t2s_model.eval() -total = sum([param.nelement() for param in t2s_model.parameters()]) -print("Number of parameter: %.2fM" % (total / 1e6)) - - def get_spepc(hps, filename): audio = load_audio(filename, int(hps.data.sampling_rate)) audio = torch.FloatTensor(audio) @@ -341,17 +320,86 @@ def get_spepc(hps, filename): return spec -dict_language = { - "中文": "zh", - "英文": "en", - "日文": "ja", - "ZH": "zh", - "EN": "en", - "JA": "ja", - "zh": "zh", - "en": "en", - "ja": "ja" -} +def pack_audio(audio_bytes, data, rate): + if media_type == "ogg": + audio_bytes = pack_ogg(audio_bytes, data, rate) + elif media_type == "aac": + audio_bytes = pack_aac(audio_bytes, data, rate) + else: + # wav无法流式, 先暂存raw + audio_bytes = pack_raw(audio_bytes, data, rate) + + return audio_bytes + + +def pack_ogg(audio_bytes, data, rate): + with sf.SoundFile(audio_bytes, mode='w', samplerate=rate, channels=1, format='ogg') as audio_file: + audio_file.write(data) + + return audio_bytes + + +def pack_raw(audio_bytes, data, rate): + audio_bytes.write(data.tobytes()) + + return audio_bytes + + +def pack_wav(audio_bytes, rate): + data = np.frombuffer(audio_bytes.getvalue(),dtype=np.int16) + wav_bytes = BytesIO() + sf.write(wav_bytes, data, rate, format='wav') + + return wav_bytes + + +def pack_aac(audio_bytes, data, rate): + process = subprocess.Popen([ + 'ffmpeg', + '-f', 's16le', # 输入16位有符号小端整数PCM + '-ar', str(rate), # 设置采样率 + '-ac', '1', # 单声道 + '-i', 'pipe:0', # 从管道读取输入 + '-c:a', 'aac', # 音频编码器为AAC + '-b:a', '192k', # 比特率 + '-vn', # 不包含视频 + '-f', 'adts', # 输出AAC数据流格式 + 'pipe:1' # 将输出写入管道 + ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = process.communicate(input=data.tobytes()) + audio_bytes.write(out) + + return audio_bytes + + +def read_clean_buffer(audio_bytes): + audio_chunk = audio_bytes.getvalue() + audio_bytes.truncate(0) + audio_bytes.seek(0) + + return audio_bytes, audio_chunk + + +def cut_text(text, punc): + punc_list = [p for p in punc if p in {",", ".", ";", "?", "!", "、", ",", "。", "?", "!", ";", ":", "…"}] + if len(punc_list) > 0: + punds = r"[" + "".join(punc_list) + r"]" + text = text.strip("\n") + items = re.split(f"({punds})", text) + mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])] + # 在句子不存在符号或句尾无符号的时候保证文本完整 + if len(items)%2 == 1: + mergeitems.append(items[-1]) + text = "\n".join(mergeitems) + + while "\n\n" in text: + text = text.replace("\n\n", "\n") + + return text + + +def only_punc(text): + return not any(t.isalnum() or t.isalpha() for t in text) def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language): @@ -374,25 +422,19 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) codes = vq_model.extract_latent(ssl_content) prompt_semantic = codes[0, 0] t1 = ttime() - prompt_language = dict_language[prompt_language] - text_language = dict_language[text_language] - phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language) - phones1 = cleaned_text_to_sequence(phones1) + prompt_language = dict_language[prompt_language.lower()] + text_language = dict_language[text_language.lower()] + phones1, bert1, norm_text1 = get_phones_and_bert(prompt_text, prompt_language) texts = text.split("\n") - audio_opt = [] + audio_bytes = BytesIO() for text in texts: - phones2, word2ph2, norm_text2 = clean_text(text, text_language) - phones2 = cleaned_text_to_sequence(phones2) - if (prompt_language == "zh"): - bert1 = get_bert_feature(norm_text1, word2ph1).to(device) - else: - bert1 = torch.zeros((1024, len(phones1)), dtype=torch.float16 if is_half == True else torch.float32).to( - device) - if (text_language == "zh"): - bert2 = get_bert_feature(norm_text2, word2ph2).to(device) - else: - bert2 = torch.zeros((1024, len(phones2))).to(bert1) + # 简单防止纯符号引发参考音频泄露 + if only_punc(text): + continue + + audio_opt = [] + phones2, bert2, norm_text2 = get_phones_and_bert(text, text_language) bert = torch.cat([bert1, bert2], 1) all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) @@ -426,8 +468,17 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language) audio_opt.append(audio) audio_opt.append(zero_wav) t4 = ttime() - print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(np.int16) + audio_bytes = pack_audio(audio_bytes,(np.concatenate(audio_opt, 0) * 32768).astype(np.int16),hps.data.sampling_rate) + # logger.info("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) + if stream_mode == "normal": + audio_bytes, audio_chunk = read_clean_buffer(audio_bytes) + yield audio_chunk + + if not stream_mode == "normal": + if media_type == "wav": + audio_bytes = pack_wav(audio_bytes,hps.data.sampling_rate) + yield audio_bytes.getvalue() + def handle_control(command): @@ -449,15 +500,16 @@ def handle_change(path, text, language): if language != "" or language is not None: default_refer.language = language - print(f"[INFO] 当前默认参考音频路径: {default_refer.path}") - print(f"[INFO] 当前默认参考音频文本: {default_refer.text}") - print(f"[INFO] 当前默认参考音频语种: {default_refer.language}") - print(f"[INFO] is_ready: {default_refer.is_ready()}") + logger.info(f"当前默认参考音频路径: {default_refer.path}") + logger.info(f"当前默认参考音频文本: {default_refer.text}") + logger.info(f"当前默认参考音频语种: {default_refer.language}") + logger.info(f"is_ready: {default_refer.is_ready()}") + return JSONResponse({"code": 0, "message": "Success"}, status_code=200) -def handle(refer_wav_path, prompt_text, prompt_language, text, text_language): +def handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc): if ( refer_wav_path == "" or refer_wav_path is None or prompt_text == "" or prompt_text is None @@ -471,24 +523,145 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language): if not default_refer.is_ready(): return JSONResponse({"code": 400, "message": "未指定参考音频且接口无预设"}, status_code=400) - with torch.no_grad(): - gen = get_tts_wav( - refer_wav_path, prompt_text, prompt_language, text, text_language - ) - sampling_rate, audio_data = next(gen) + if cut_punc == None: + text = cut_text(text,default_cut_punc) + else: + text = cut_text(text,cut_punc) - wav = BytesIO() - sf.write(wav, audio_data, sampling_rate, format="wav") - wav.seek(0) - - torch.cuda.empty_cache() - return StreamingResponse(wav, media_type="audio/wav") + return StreamingResponse(get_tts_wav(refer_wav_path, prompt_text, prompt_language, text, text_language), media_type="audio/"+media_type) + + +# -------------------------------- +# 初始化部分 +# -------------------------------- +now_dir = os.getcwd() +sys.path.append(now_dir) +sys.path.append("%s/GPT_SoVITS" % (now_dir)) + +dict_language = { + "中文": "all_zh", + "英文": "en", + "日文": "all_ja", + "中英混合": "zh", + "日英混合": "ja", + "多语种混合": "auto", #多语种启动切分识别语种 + "all_zh": "all_zh", + "en": "en", + "all_ja": "all_ja", + "zh": "zh", + "ja": "ja", + "auto": "auto", +} + +# logger +logging.config.dictConfig(uvicorn.config.LOGGING_CONFIG) +logger = logging.getLogger('uvicorn') + +# 获取配置 +g_config = global_config.Config() + +# 获取参数 +parser = argparse.ArgumentParser(description="GPT-SoVITS api") + +parser.add_argument("-s", "--sovits_path", type=str, default=g_config.sovits_path, help="SoVITS模型路径") +parser.add_argument("-g", "--gpt_path", type=str, default=g_config.gpt_path, help="GPT模型路径") +parser.add_argument("-dr", "--default_refer_path", type=str, default="", help="默认参考音频路径") +parser.add_argument("-dt", "--default_refer_text", type=str, default="", help="默认参考音频文本") +parser.add_argument("-dl", "--default_refer_language", type=str, default="", help="默认参考音频语种") +parser.add_argument("-d", "--device", type=str, default=g_config.infer_device, help="cuda / cpu") +parser.add_argument("-a", "--bind_addr", type=str, default="0.0.0.0", help="default: 0.0.0.0") +parser.add_argument("-p", "--port", type=int, default=g_config.api_port, help="default: 9880") +parser.add_argument("-fp", "--full_precision", action="store_true", default=False, help="覆盖config.is_half为False, 使用全精度") +parser.add_argument("-hp", "--half_precision", action="store_true", default=False, help="覆盖config.is_half为True, 使用半精度") +# bool值的用法为 `python ./api.py -fp ...` +# 此时 full_precision==True, half_precision==False +parser.add_argument("-sm", "--stream_mode", type=str, default="close", help="流式返回模式, close / normal / keepalive") +parser.add_argument("-mt", "--media_type", type=str, default="wav", help="音频编码格式, wav / ogg / aac") +parser.add_argument("-cp", "--cut_punc", type=str, default="", help="文本切分符号设定, 符号范围,.;?!、,。?!;:…") +# 切割常用分句符为 `python ./api.py -cp ".?!。?!"` +parser.add_argument("-hb", "--hubert_path", type=str, default=g_config.cnhubert_path, help="覆盖config.cnhubert_path") +parser.add_argument("-b", "--bert_path", type=str, default=g_config.bert_path, help="覆盖config.bert_path") + +args = parser.parse_args() +sovits_path = args.sovits_path +gpt_path = args.gpt_path +device = args.device +port = args.port +host = args.bind_addr +cnhubert_base_path = args.hubert_path +bert_path = args.bert_path +default_cut_punc = args.cut_punc + +# 应用参数配置 +default_refer = DefaultRefer(args.default_refer_path, args.default_refer_text, args.default_refer_language) + +# 模型路径检查 +if sovits_path == "": + sovits_path = g_config.pretrained_sovits_path + logger.warn(f"未指定SoVITS模型路径, fallback后当前值: {sovits_path}") +if gpt_path == "": + gpt_path = g_config.pretrained_gpt_path + logger.warn(f"未指定GPT模型路径, fallback后当前值: {gpt_path}") + +# 指定默认参考音频, 调用方 未提供/未给全 参考音频参数时使用 +if default_refer.path == "" or default_refer.text == "" or default_refer.language == "": + default_refer.path, default_refer.text, default_refer.language = "", "", "" + logger.info("未指定默认参考音频") +else: + logger.info(f"默认参考音频路径: {default_refer.path}") + logger.info(f"默认参考音频文本: {default_refer.text}") + logger.info(f"默认参考音频语种: {default_refer.language}") + +# 获取半精度 +is_half = g_config.is_half +if args.full_precision: + is_half = False +if args.half_precision: + is_half = True +if args.full_precision and args.half_precision: + is_half = g_config.is_half # 炒饭fallback +logger.info(f"半精: {is_half}") + +# 流式返回模式 +if args.stream_mode.lower() in ["normal","n"]: + stream_mode = "normal" + logger.info("流式返回已开启") +else: + stream_mode = "close" + +# 音频编码格式 +if args.media_type.lower() in ["aac","ogg"]: + media_type = args.media_type.lower() +elif stream_mode == "close": + media_type = "wav" +else: + media_type = "ogg" +logger.info(f"编码格式: {media_type}") + +# 初始化模型 +cnhubert.cnhubert_base_path = cnhubert_base_path +tokenizer = AutoTokenizer.from_pretrained(bert_path) +bert_model = AutoModelForMaskedLM.from_pretrained(bert_path) +ssl_model = cnhubert.get_model() +if is_half: + bert_model = bert_model.half().to(device) + ssl_model = ssl_model.half().to(device) +else: + bert_model = bert_model.to(device) + ssl_model = ssl_model.to(device) +change_sovits_weights(sovits_path) +change_gpt_weights(gpt_path) + + + + +# -------------------------------- +# 接口部分 +# -------------------------------- app = FastAPI() -#clark新增-----2024-02-21 -#可在启动后动态修改模型,以此满足同一个api不同的朗读者请求 @app.post("/set_model") async def set_model(request: Request): json_post_raw = await request.json() @@ -496,11 +669,11 @@ async def set_model(request: Request): gpt_path=json_post_raw.get("gpt_model_path") global sovits_path sovits_path=json_post_raw.get("sovits_model_path") - print("gptpath"+gpt_path+";vitspath"+sovits_path) + logger.info("gptpath"+gpt_path+";vitspath"+sovits_path) change_sovits_weights(sovits_path) change_gpt_weights(gpt_path) return "ok" -# 新增-----end------ + @app.post("/control") async def control(request: Request): @@ -541,6 +714,7 @@ async def tts_endpoint(request: Request): json_post_raw.get("prompt_language"), json_post_raw.get("text"), json_post_raw.get("text_language"), + json_post_raw.get("cut_punc"), ) @@ -551,8 +725,9 @@ async def tts_endpoint( prompt_language: str = None, text: str = None, text_language: str = None, + cut_punc: str = None, ): - return handle(refer_wav_path, prompt_text, prompt_language, text, text_language) + return handle(refer_wav_path, prompt_text, prompt_language, text, text_language, cut_punc) if __name__ == "__main__": diff --git a/docs/cn/README.md b/docs/cn/README.md index 6513e122..5ff8a763 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -141,7 +141,15 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker - [UVR5 Weights](https://www.icloud.com.cn/iclouddrive/0bekRKDiJXboFhbfm3lM2fVbA#UVR5_Weights) -对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/damo_asr/models` 中。 +对于中文自动语音识别(附加),从 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files), 和 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) 下载模型,并将它们放置在 `tools/asr/models` 中。 + +对于英语与日语自动语音识别(附加),从 [Faster Whisper Large V3](https://huggingface.co/Systran/faster-whisper-large-v3) 下载模型,并将它们放置在 `tools/asr/models` 中。 此外,[其他模型](https://huggingface.co/Systran)可能具有类似效果,但占用更小的磁盘空间。 + +中国地区用户可以通过以下链接下载: +- [Faster Whisper Large V3](https://www.icloud.com/iclouddrive/0c4pQxFs7oWyVU1iMTq2DbmLA#faster-whisper-large-v3)(点击“下载副本”) + +- [Faster Whisper Large V3](https://hf-mirror.com/Systran/faster-whisper-large-v3)(Hugging Face镜像站) + ## 数据集格式 @@ -204,13 +212,13 @@ python audio_slicer.py \ ```` 这是使用命令行完成数据集ASR处理的方式(仅限中文) ```` -python tools/damo_asr/cmd-asr.py "" +python tools/asr/funasr_asr.py -i -o ```` 通过Faster_Whisper进行ASR处理(除中文之外的ASR标记) (没有进度条,GPU性能可能会导致时间延迟) ```` -python ./tools/damo_asr/WhisperASR.py -i -o -f -l +python ./tools/asr/fasterwhisper_asr.py -i -o -l ```` 启用自定义列表保存路径 ## 致谢 diff --git a/docs/ja/README.md b/docs/ja/README.md index 5d9b4dec..ca2b067a 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -127,7 +127,7 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS) から事前訓練済みモデルをダウンロードし、`GPT_SoVITSpretrained_models` に置きます。 -中国語 ASR(追加)については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/damo_asr/models` に置いてください。 +中国語 ASR(追加)については、[Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)、[Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)、[Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files) からモデルをダウンロードし、`tools/asr/models` に置いてください。 UVR5 (Vocals/Accompaniment Separation & Reverberation Removal, additionally) の場合は、[UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights) からモデルをダウンロードして `tools/uvr5/uvr5_weights` に置きます。 @@ -156,7 +156,7 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin. - [ ] **優先度 高:** - [x] 日本語と英語でのローカライズ。 - - [ ] ユーザーガイド。 + - [] ユーザーガイド。 - [x] 日本語データセットと英語データセットのファインチューニングトレーニング。 - [ ] **機能:** @@ -192,13 +192,13 @@ python audio_slicer.py \ ``` コマンドラインを使用してデータセット ASR 処理を行う方法です (中国語のみ) ``` -python tools/damo_asr/cmd-asr.py "" +python tools/asr/funasr_asr.py -i -o ``` ASR処理はFaster_Whisperを通じて実行されます(中国語を除くASRマーキング) (進行状況バーは表示されません。GPU のパフォーマンスにより時間遅延が発生する可能性があります) ``` -python ./tools/damo_asr/WhisperASR.py -i -o -f -l +python ./tools/asr/fasterwhisper_asr.py -i -o -l ``` カスタムリストの保存パスが有効になっています ## クレジット diff --git a/docs/ko/README.md b/docs/ko/README.md index bb01d5d5..bf94d9be 100644 --- a/docs/ko/README.md +++ b/docs/ko/README.md @@ -130,7 +130,7 @@ docker run --rm -it --gpus=all --env=is_half=False --volume=G:\GPT-SoVITS-Docker [GPT-SoVITS Models](https://huggingface.co/lj1995/GPT-SoVITS)에서 사전 훈련된 모델을 다운로드하고 `GPT_SoVITS\pretrained_models`에 넣습니다. -중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/damo_asr/models`에 넣습니다. +중국어 자동 음성 인식(ASR), 음성 반주 분리 및 음성 제거를 위해 [Damo ASR Model](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files), [Damo VAD Model](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/files) 및 [Damo Punc Model](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/files)을 다운로드하고 `tools/asr/models`에 넣습니다. UVR5(음성/반주 분리 및 잔향 제거)를 위해 [UVR5 Weights](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/uvr5_weights)에서 모델을 다운로드하고 `tools/uvr5/uvr5_weights`에 넣습니다. @@ -196,13 +196,13 @@ python audio_slicer.py \ ``` 명령줄을 사용하여 데이터 세트 ASR 처리를 수행하는 방법입니다(중국어만 해당). ``` -python tools/damo_asr/cmd-asr.py "" +python tools/asr/funasr_asr.py -i -o ``` ASR 처리는 Faster_Whisper(중국어를 제외한 ASR 마킹)를 통해 수행됩니다. (진행률 표시줄 없음, GPU 성능으로 인해 시간 지연이 발생할 수 있음) ``` -python ./tools/damo_asr/WhisperASR.py -i -o -f -l +python ./tools/asr/fasterwhisper_asr.py -i -o -l ``` 사용자 정의 목록 저장 경로가 활성화되었습니다. ## 감사의 말 diff --git a/i18n/locale/zh_CN.json b/i18n/locale/zh_CN.json index c2df6c61..e6639c51 100644 --- a/i18n/locale/zh_CN.json +++ b/i18n/locale/zh_CN.json @@ -2,6 +2,18 @@ "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", "UVR5已开启": "UVR5已开启", "UVR5已关闭": "UVR5已关闭", + "输入文件夹路径": "输入文件夹路径", + "输出文件夹路径": "输出文件夹路径", + "ASR 模型": "ASR 模型", + "ASR 模型尺寸": "ASR 模型尺寸", + "ASR 语言设置": "ASR 语言设置", + "模型切换": "模型切换", + "是否开启dpo训练选项(实验性)": "是否开启dpo训练选项(实验性)", + "开启无参考文本模式。不填参考文本亦相当于开启。": "开启无参考文本模式。不填参考文本亦相当于开启。", + "使用无参考文本模式时建议使用微调的GPT": "使用无参考文本模式时建议使用微调的GPT", + "后续将支持转音素、手工修改音素、语音合成分步执行。": "后续将支持转音素、手工修改音素、语音合成分步执行。", + "gpt采样参数(无参考文本时不要太低):": "gpt采样参数(无参考文本时不要太低):", + "按标点符号切": "按标点符号切", "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", "0-前置数据集获取工具": "0-前置数据集获取工具", "0a-UVR5人声伴奏分离&去混响去延迟工具": "0a-UVR5人声伴奏分离&去混响去延迟工具",