diff --git a/.gitignore b/.gitignore index 00f6bb9..96e754a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,5 +7,8 @@ runtime output logs reference +GPT_weights SoVITS_weights -GPT_weights \ No newline at end of file +TEMP + + diff --git a/GPT_SoVITS/AR/models/t2s_model.py b/GPT_SoVITS/AR/models/t2s_model.py index aaeace9..815ecec 100644 --- a/GPT_SoVITS/AR/models/t2s_model.py +++ b/GPT_SoVITS/AR/models/t2s_model.py @@ -337,7 +337,7 @@ class Text2SemanticDecoder(nn.Module): # AR Decoder y = prompts - prefix_len = y.shape[1] + x_len = x.shape[1] x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool) stop = False @@ -353,47 +353,41 @@ class Text2SemanticDecoder(nn.Module): "first_infer": 1, "stage": 0, } - for idx in tqdm(range(1500)): - if cache["first_infer"] == 1: - y_emb = self.ar_audio_embedding(y) - else: - y_emb = torch.cat( - [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1 - ) - cache["y_emb"] = y_emb + ################### first step ########################## + if y is not None: + y_emb = self.ar_audio_embedding(y) + y_len = y_emb.shape[1] + prefix_len = y.shape[1] y_pos = self.ar_audio_position(y_emb) - # x 和逐渐增长的 y 一起输入给模型 - if cache["first_infer"] == 1: - xy_pos = torch.concat([x, y_pos], dim=1) - else: - xy_pos = y_pos[:, -1:] - y_len = y_pos.shape[1] - ###以下3个不做缓存 - if cache["first_infer"] == 1: - x_attn_mask_pad = F.pad( + xy_pos = torch.concat([x, y_pos], dim=1) + cache["y_emb"] = y_emb + ref_free = False + else: + y_emb = None + y_len = 0 + prefix_len = 0 + y_pos = None + xy_pos = x + y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device) + ref_free = True + + x_attn_mask_pad = F.pad( x_attn_mask, (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y) value=True, ) - y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) - torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), - (x_len, 0), - value=False, - ) - xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to( - y.device - ) - else: - ###最右边一列(是错的) - # xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device) - # xy_attn_mask[:,-1]=False - ###最下面一行(是对的) - xy_attn_mask = torch.zeros( - (1, x_len + y_len), dtype=torch.bool, device=xy_pos.device - ) - # pdb.set_trace() - ###缓存重头戏 - # print(1111,xy_pos.shape,xy_attn_mask.shape,x_len,y_len) + y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y) + torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1), + (x_len, 0), + value=False, + ) + xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to( + x.device + ) + + + for idx in tqdm(range(1500)): + xy_dec, _ = self.h((xy_pos, None), mask=xy_attn_mask, cache=cache) logits = self.ar_predict_layer( xy_dec[:, -1] @@ -404,6 +398,10 @@ class Text2SemanticDecoder(nn.Module): samples = sample( logits[0], y, top_k=top_k, top_p=top_p, repetition_penalty=1.35, temperature=temperature )[0].unsqueeze(0) + # 本次生成的 semantic_ids 和之前的 y 构成新的 y + # print(samples.shape)#[1,1]#第一个1是bs + y = torch.concat([y, samples], dim=1) + if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num: print("use early stop num:", early_stop_num) stop = True @@ -412,13 +410,38 @@ class Text2SemanticDecoder(nn.Module): # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS) stop = True if stop: - if prompts.shape[1] == y.shape[1]: + # if prompts.shape[1] == y.shape[1]: + # y = torch.concat([y, torch.zeros_like(samples)], dim=1) + # print("bad zero prediction") + if y.shape[1]==0: y = torch.concat([y, torch.zeros_like(samples)], dim=1) print("bad zero prediction") print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]") break - # 本次生成的 semantic_ids 和之前的 y 构成新的 y - # print(samples.shape)#[1,1]#第一个1是bs - y = torch.concat([y, samples], dim=1) + + ####################### update next step ################################### cache["first_infer"] = 0 - return y, idx + if cache["y_emb"] is not None: + y_emb = torch.cat( + [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], dim = 1 + ) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + xy_pos = y_pos[:, -1:] + else: + y_emb = self.ar_audio_embedding(y[:, -1:]) + cache["y_emb"] = y_emb + y_pos = self.ar_audio_position(y_emb) + xy_pos = y_pos + y_len = y_pos.shape[1] + + ###最右边一列(是错的) + # xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device) + # xy_attn_mask[:,-1]=False + ###最下面一行(是对的) + xy_attn_mask = torch.zeros( + (1, x_len + y_len), dtype=torch.bool, device=xy_pos.device + ) + if ref_free: + return y[:, :-1], 0 + return y[:, :-1], idx-1 diff --git a/GPT_SoVITS/AR/models/utils.py b/GPT_SoVITS/AR/models/utils.py index bc5f2d0..84063f8 100644 --- a/GPT_SoVITS/AR/models/utils.py +++ b/GPT_SoVITS/AR/models/utils.py @@ -114,7 +114,8 @@ def logits_to_probs( top_p: Optional[int] = None, repetition_penalty: float = 1.0, ): - previous_tokens = previous_tokens.squeeze() + if previous_tokens is not None: + previous_tokens = previous_tokens.squeeze() # print(logits.shape,previous_tokens.shape) # pdb.set_trace() if previous_tokens is not None and repetition_penalty != 1.0: diff --git a/GPT_SoVITS/AR/modules/patched_mha_with_cache.py b/GPT_SoVITS/AR/modules/patched_mha_with_cache.py index 5720670..7be241d 100644 --- a/GPT_SoVITS/AR/modules/patched_mha_with_cache.py +++ b/GPT_SoVITS/AR/modules/patched_mha_with_cache.py @@ -5,8 +5,8 @@ from torch.nn.functional import ( _none_or_dtype, _in_projection_packed, ) - -# import torch +from torch.nn import functional as F +import torch # Tensor = torch.Tensor # from typing import Callable, List, Optional, Tuple, Union @@ -448,9 +448,11 @@ def multi_head_attention_forward_patched( k = k.view(bsz, num_heads, src_len, head_dim) v = v.view(bsz, num_heads, src_len, head_dim) + # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = scaled_dot_product_attention( q, k, v, attn_mask, dropout_p, is_causal ) + attn_output = ( attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim) ) diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 9c5197a..7ae9259 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -365,15 +365,19 @@ def merge_short_text_in_array(texts, threshold): result[len(result) - 1] += text return result -def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6): +def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False): + if prompt_text is None or len(prompt_text) == 0: + ref_free = True t0 = ttime() prompt_language = dict_language[prompt_language] text_language = dict_language[text_language] - prompt_text = prompt_text.strip("\n") - if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "." + if not ref_free: + prompt_text = prompt_text.strip("\n") + if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "." + print(i18n("实际输入的参考文本:"), prompt_text) text = text.strip("\n") if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text - print(i18n("实际输入的参考文本:"), prompt_text) + print(i18n("实际输入的目标文本:"), text) zero_wav = np.zeros( int(hps.data.sampling_rate * 0.3), @@ -398,11 +402,10 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, 1, 2 ) # .float() codes = vq_model.extract_latent(ssl_content) + prompt_semantic = codes[0, 0] t1 = ttime() - phones1, word2ph1, norm_text1=get_cleaned_text_final(prompt_text, prompt_language) - if (how_to_cut == i18n("凑四句一切")): text = cut1(text) elif (how_to_cut == i18n("凑50字一切")): @@ -419,7 +422,9 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, texts = text.split("\n") texts = merge_short_text_in_array(texts, 5) audio_opt = [] - bert1=get_bert_final(phones1, word2ph1, norm_text1,prompt_language,device).to(dtype) + if not ref_free: + phones1, word2ph1, norm_text1=get_cleaned_text_final(prompt_text, prompt_language) + bert1=get_bert_final(phones1, word2ph1, norm_text1,prompt_language,device).to(dtype) for text in texts: # 解决输入目标文本的空行导致报错的问题 @@ -429,9 +434,13 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, print(i18n("实际输入的目标文本(每句):"), text) phones2, word2ph2, norm_text2 = get_cleaned_text_final(text, text_language) bert2 = get_bert_final(phones2, word2ph2, norm_text2, text_language, device).to(dtype) - bert = torch.cat([bert1, bert2], 1) + if not ref_free: + bert = torch.cat([bert1, bert2], 1) + all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0) + else: + bert = bert2 + all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0) - all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0) bert = bert.to(device).unsqueeze(0) all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device) prompt = prompt_semantic.unsqueeze(0).to(device) @@ -441,7 +450,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, pred_semantic, idx = t2s_model.model.infer_panel( all_phoneme_ids, all_phoneme_len, - prompt, + None if ref_free else prompt, bert, # prompt_phone_len=ph_offset, top_k=top_k, @@ -607,7 +616,10 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: gr.Markdown(value=i18n("*请上传并填写参考信息")) with gr.Row(): inp_ref = gr.Audio(label=i18n("请上传3~10秒内参考音频,超过会报错!"), type="filepath") - prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") + with gr.Column(): + ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式 无参考文本时该选项无效"), value=False, interactive=True, show_label=True) + gr.Markdown("使用无参考文本模式时建议使用微调GPT") + prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="") prompt_language = gr.Dropdown( label=i18n("参考音频的语种"), choices=[i18n("中文"), i18n("英文"), i18n("日文"), i18n("中英混合"), i18n("日英混合"), i18n("多语种混合")], value=i18n("中文") ) @@ -624,6 +636,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: interactive=True, ) with gr.Row(): + gr.Markdown("gpt采样参数(无参考文本时不要太低):") top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=5,interactive=True) top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True) temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True) @@ -632,7 +645,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app: inference_button.click( get_tts_wav, - [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut,top_k,top_p,temperature], + [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free], [output], ) diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index c99485c..a4d2235 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -228,6 +228,7 @@ class TextEncoder(nn.Module): ) y = self.ssl_proj(y * y_mask) * y_mask + y = self.encoder_ssl(y * y_mask, y_mask) text_mask = torch.unsqueeze( @@ -958,11 +959,13 @@ class SynthesizerTrn(nn.Module): @torch.no_grad() def decode(self, codes, text, refer, noise_scale=0.5): - refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device) - refer_mask = torch.unsqueeze( - commons.sequence_mask(refer_lengths, refer.size(2)), 1 - ).to(refer.dtype) - ge = self.ref_enc(refer * refer_mask, refer_mask) + ge = None + if refer is not None: + refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device) + refer_mask = torch.unsqueeze( + commons.sequence_mask(refer_lengths, refer.size(2)), 1 + ).to(refer.dtype) + ge = self.ref_enc(refer * refer_mask, refer_mask) y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device) text_lengths = torch.LongTensor([text.size(-1)]).to(text.device)