mirror of
				https://github.com/RVC-Boss/GPT-SoVITS.git
				synced 2025-11-04 10:52:13 +08:00 
			
		
		
		
	update_infer
This commit is contained in:
		
							parent
							
								
									41041715a4
								
							
						
					
					
						commit
						1803729360
					
				
							
								
								
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@ -7,4 +7,6 @@ runtime
 | 
				
			|||||||
output
 | 
					output
 | 
				
			||||||
logs
 | 
					logs
 | 
				
			||||||
reference
 | 
					reference
 | 
				
			||||||
SoVITS_weights
 | 
					GPT_weights
 | 
				
			||||||
 | 
					SoVITS_weights
 | 
				
			||||||
 | 
					TEMP
 | 
				
			||||||
 | 
				
			|||||||
@ -240,7 +240,7 @@ class Text2SemanticDecoder(nn.Module):
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
        # AR Decoder
 | 
					        # AR Decoder
 | 
				
			||||||
        y = prompts
 | 
					        y = prompts
 | 
				
			||||||
        prefix_len = y.shape[1]
 | 
					        
 | 
				
			||||||
        x_len = x.shape[1]
 | 
					        x_len = x.shape[1]
 | 
				
			||||||
        x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
 | 
					        x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
 | 
				
			||||||
        stop = False
 | 
					        stop = False
 | 
				
			||||||
@ -256,47 +256,41 @@ class Text2SemanticDecoder(nn.Module):
 | 
				
			|||||||
            "first_infer": 1,
 | 
					            "first_infer": 1,
 | 
				
			||||||
            "stage": 0,
 | 
					            "stage": 0,
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        for idx in tqdm(range(1500)):
 | 
					        ###################  first step ##########################
 | 
				
			||||||
            if cache["first_infer"] == 1:
 | 
					        if y is not None:
 | 
				
			||||||
                y_emb = self.ar_audio_embedding(y)
 | 
					            y_emb = self.ar_audio_embedding(y)
 | 
				
			||||||
            else:
 | 
					            y_len = y_emb.shape[1]
 | 
				
			||||||
                y_emb = torch.cat(
 | 
					            prefix_len = y.shape[1]
 | 
				
			||||||
                    [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
 | 
					 | 
				
			||||||
                )
 | 
					 | 
				
			||||||
            cache["y_emb"] = y_emb
 | 
					 | 
				
			||||||
            y_pos = self.ar_audio_position(y_emb)
 | 
					            y_pos = self.ar_audio_position(y_emb)
 | 
				
			||||||
            # x 和逐渐增长的 y 一起输入给模型
 | 
					            xy_pos = torch.concat([x, y_pos], dim=1)
 | 
				
			||||||
            if cache["first_infer"] == 1:
 | 
					            cache["y_emb"] = y_emb
 | 
				
			||||||
                xy_pos = torch.concat([x, y_pos], dim=1)
 | 
					            ref_free = False
 | 
				
			||||||
            else:
 | 
					        else:
 | 
				
			||||||
                xy_pos = y_pos[:, -1:]
 | 
					            y_emb = None
 | 
				
			||||||
            y_len = y_pos.shape[1]
 | 
					            y_len = 0
 | 
				
			||||||
            ###以下3个不做缓存
 | 
					            prefix_len = 0
 | 
				
			||||||
            if cache["first_infer"] == 1:
 | 
					            y_pos = None
 | 
				
			||||||
                x_attn_mask_pad = F.pad(
 | 
					            xy_pos = x
 | 
				
			||||||
 | 
					            y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
 | 
				
			||||||
 | 
					            ref_free = True
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        x_attn_mask_pad = F.pad(
 | 
				
			||||||
                    x_attn_mask,
 | 
					                    x_attn_mask,
 | 
				
			||||||
                    (0, y_len),  ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
 | 
					                    (0, y_len),  ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
 | 
				
			||||||
                    value=True,
 | 
					                    value=True,
 | 
				
			||||||
                )
 | 
					                )
 | 
				
			||||||
                y_attn_mask = F.pad(  ###yy的右上1扩展到左边xy的0,(y,x+y)
 | 
					        y_attn_mask = F.pad(  ###yy的右上1扩展到左边xy的0,(y,x+y)
 | 
				
			||||||
                    torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
 | 
					            torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
 | 
				
			||||||
                    (x_len, 0),
 | 
					            (x_len, 0),
 | 
				
			||||||
                    value=False,
 | 
					            value=False,
 | 
				
			||||||
                )
 | 
					        )
 | 
				
			||||||
                xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
 | 
					        xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
 | 
				
			||||||
                    y.device
 | 
					            x.device
 | 
				
			||||||
                )
 | 
					        )
 | 
				
			||||||
            else:
 | 
					        
 | 
				
			||||||
                ###最右边一列(是错的)
 | 
					
 | 
				
			||||||
                # xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device)
 | 
					        for idx in tqdm(range(1500)):
 | 
				
			||||||
                # xy_attn_mask[:,-1]=False
 | 
					            
 | 
				
			||||||
                ###最下面一行(是对的)
 | 
					 | 
				
			||||||
                xy_attn_mask = torch.zeros(
 | 
					 | 
				
			||||||
                    (1, x_len + y_len), dtype=torch.bool, device=xy_pos.device
 | 
					 | 
				
			||||||
                )
 | 
					 | 
				
			||||||
            # pdb.set_trace()
 | 
					 | 
				
			||||||
            ###缓存重头戏
 | 
					 | 
				
			||||||
            # print(1111,xy_pos.shape,xy_attn_mask.shape,x_len,y_len)
 | 
					 | 
				
			||||||
            xy_dec, _ = self.h((xy_pos, None), mask=xy_attn_mask, cache=cache)
 | 
					            xy_dec, _ = self.h((xy_pos, None), mask=xy_attn_mask, cache=cache)
 | 
				
			||||||
            logits = self.ar_predict_layer(
 | 
					            logits = self.ar_predict_layer(
 | 
				
			||||||
                xy_dec[:, -1]
 | 
					                xy_dec[:, -1]
 | 
				
			||||||
@ -307,6 +301,10 @@ class Text2SemanticDecoder(nn.Module):
 | 
				
			|||||||
            samples = sample(
 | 
					            samples = sample(
 | 
				
			||||||
                logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35
 | 
					                logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35
 | 
				
			||||||
            )[0].unsqueeze(0)
 | 
					            )[0].unsqueeze(0)
 | 
				
			||||||
 | 
					            # 本次生成的 semantic_ids 和之前的 y 构成新的 y
 | 
				
			||||||
 | 
					            # print(samples.shape)#[1,1]#第一个1是bs
 | 
				
			||||||
 | 
					            y = torch.concat([y, samples], dim=1) 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
 | 
					            if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
 | 
				
			||||||
                print("use early stop num:", early_stop_num)
 | 
					                print("use early stop num:", early_stop_num)
 | 
				
			||||||
                stop = True
 | 
					                stop = True
 | 
				
			||||||
@ -315,13 +313,38 @@ class Text2SemanticDecoder(nn.Module):
 | 
				
			|||||||
                # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
 | 
					                # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
 | 
				
			||||||
                stop = True
 | 
					                stop = True
 | 
				
			||||||
            if stop:
 | 
					            if stop:
 | 
				
			||||||
                if prompts.shape[1] == y.shape[1]:
 | 
					                # if prompts.shape[1] == y.shape[1]:
 | 
				
			||||||
 | 
					                #     y = torch.concat([y, torch.zeros_like(samples)], dim=1)
 | 
				
			||||||
 | 
					                #     print("bad zero prediction")
 | 
				
			||||||
 | 
					                if y.shape[1]==0:
 | 
				
			||||||
                    y = torch.concat([y, torch.zeros_like(samples)], dim=1)
 | 
					                    y = torch.concat([y, torch.zeros_like(samples)], dim=1)
 | 
				
			||||||
                    print("bad zero prediction")
 | 
					                    print("bad zero prediction")
 | 
				
			||||||
                print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
 | 
					                print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
 | 
				
			||||||
                break
 | 
					                break
 | 
				
			||||||
            # 本次生成的 semantic_ids 和之前的 y 构成新的 y
 | 
					            
 | 
				
			||||||
            # print(samples.shape)#[1,1]#第一个1是bs
 | 
					            ####################### update next step ###################################
 | 
				
			||||||
            y = torch.concat([y, samples], dim=1)
 | 
					 | 
				
			||||||
            cache["first_infer"] = 0
 | 
					            cache["first_infer"] = 0
 | 
				
			||||||
        return y, idx
 | 
					            if cache["y_emb"] is not None:
 | 
				
			||||||
 | 
					                y_emb = torch.cat(
 | 
				
			||||||
 | 
					                    [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], dim = 1
 | 
				
			||||||
 | 
					                )
 | 
				
			||||||
 | 
					                cache["y_emb"] = y_emb
 | 
				
			||||||
 | 
					                y_pos = self.ar_audio_position(y_emb)
 | 
				
			||||||
 | 
					                xy_pos = y_pos[:, -1:]
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                y_emb = self.ar_audio_embedding(y[:, -1:])
 | 
				
			||||||
 | 
					                cache["y_emb"] = y_emb
 | 
				
			||||||
 | 
					                y_pos = self.ar_audio_position(y_emb)
 | 
				
			||||||
 | 
					                xy_pos = y_pos
 | 
				
			||||||
 | 
					            y_len = y_pos.shape[1]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            ###最右边一列(是错的)
 | 
				
			||||||
 | 
					            # xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device)
 | 
				
			||||||
 | 
					            # xy_attn_mask[:,-1]=False
 | 
				
			||||||
 | 
					            ###最下面一行(是对的)
 | 
				
			||||||
 | 
					            xy_attn_mask = torch.zeros(
 | 
				
			||||||
 | 
					                (1, x_len + y_len), dtype=torch.bool, device=xy_pos.device
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					        if ref_free:
 | 
				
			||||||
 | 
					            return y[:, :-1], 0
 | 
				
			||||||
 | 
					        return y[:, :-1], idx-1
 | 
				
			||||||
 | 
				
			|||||||
@ -114,7 +114,8 @@ def logits_to_probs(
 | 
				
			|||||||
    top_p: Optional[int] = None,
 | 
					    top_p: Optional[int] = None,
 | 
				
			||||||
    repetition_penalty: float = 1.0,
 | 
					    repetition_penalty: float = 1.0,
 | 
				
			||||||
):
 | 
					):
 | 
				
			||||||
    previous_tokens = previous_tokens.squeeze()
 | 
					    if previous_tokens is not None:
 | 
				
			||||||
 | 
					        previous_tokens = previous_tokens.squeeze()
 | 
				
			||||||
    # print(logits.shape,previous_tokens.shape)
 | 
					    # print(logits.shape,previous_tokens.shape)
 | 
				
			||||||
    # pdb.set_trace()
 | 
					    # pdb.set_trace()
 | 
				
			||||||
    if previous_tokens is not None and repetition_penalty != 1.0:
 | 
					    if previous_tokens is not None and repetition_penalty != 1.0:
 | 
				
			||||||
 | 
				
			|||||||
@ -5,8 +5,8 @@ from torch.nn.functional import (
 | 
				
			|||||||
    _none_or_dtype,
 | 
					    _none_or_dtype,
 | 
				
			||||||
    _in_projection_packed,
 | 
					    _in_projection_packed,
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					from torch.nn import functional as F
 | 
				
			||||||
# import torch
 | 
					import torch
 | 
				
			||||||
# Tensor = torch.Tensor
 | 
					# Tensor = torch.Tensor
 | 
				
			||||||
# from typing import Callable, List, Optional, Tuple, Union
 | 
					# from typing import Callable, List, Optional, Tuple, Union
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -448,9 +448,11 @@ def multi_head_attention_forward_patched(
 | 
				
			|||||||
        k = k.view(bsz, num_heads, src_len, head_dim)
 | 
					        k = k.view(bsz, num_heads, src_len, head_dim)
 | 
				
			||||||
        v = v.view(bsz, num_heads, src_len, head_dim)
 | 
					        v = v.view(bsz, num_heads, src_len, head_dim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
 | 
				
			||||||
        attn_output = scaled_dot_product_attention(
 | 
					        attn_output = scaled_dot_product_attention(
 | 
				
			||||||
            q, k, v, attn_mask, dropout_p, is_causal
 | 
					            q, k, v, attn_mask, dropout_p, is_causal
 | 
				
			||||||
        )
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        attn_output = (
 | 
					        attn_output = (
 | 
				
			||||||
            attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
 | 
					            attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
 | 
				
			||||||
        )
 | 
					        )
 | 
				
			||||||
 | 
				
			|||||||
@ -392,6 +392,7 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
 | 
				
			|||||||
            1, 2
 | 
					            1, 2
 | 
				
			||||||
        )  # .float()
 | 
					        )  # .float()
 | 
				
			||||||
        codes = vq_model.extract_latent(ssl_content)
 | 
					        codes = vq_model.extract_latent(ssl_content)
 | 
				
			||||||
 | 
					   
 | 
				
			||||||
        prompt_semantic = codes[0, 0]
 | 
					        prompt_semantic = codes[0, 0]
 | 
				
			||||||
    t1 = ttime()
 | 
					    t1 = ttime()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -423,9 +424,9 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
 | 
				
			|||||||
        print(i18n("实际输入的目标文本(每句):"), text)
 | 
					        print(i18n("实际输入的目标文本(每句):"), text)
 | 
				
			||||||
        phones2, word2ph2, norm_text2 = get_cleaned_text_final(text, text_language)
 | 
					        phones2, word2ph2, norm_text2 = get_cleaned_text_final(text, text_language)
 | 
				
			||||||
        bert2 = get_bert_final(phones2, word2ph2, norm_text2, text_language, device).to(dtype)
 | 
					        bert2 = get_bert_final(phones2, word2ph2, norm_text2, text_language, device).to(dtype)
 | 
				
			||||||
        bert = torch.cat([bert1, bert2], 1)
 | 
					        bert = torch.cat([bert2], 1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        all_phoneme_ids = torch.LongTensor(phones1 + phones2).to(device).unsqueeze(0)
 | 
					        all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
 | 
				
			||||||
        bert = bert.to(device).unsqueeze(0)
 | 
					        bert = bert.to(device).unsqueeze(0)
 | 
				
			||||||
        all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
 | 
					        all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
 | 
				
			||||||
        prompt = prompt_semantic.unsqueeze(0).to(device)
 | 
					        prompt = prompt_semantic.unsqueeze(0).to(device)
 | 
				
			||||||
@ -435,14 +436,14 @@ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language,
 | 
				
			|||||||
            pred_semantic, idx = t2s_model.model.infer_panel(
 | 
					            pred_semantic, idx = t2s_model.model.infer_panel(
 | 
				
			||||||
                all_phoneme_ids,
 | 
					                all_phoneme_ids,
 | 
				
			||||||
                all_phoneme_len,
 | 
					                all_phoneme_len,
 | 
				
			||||||
                prompt,
 | 
					                None,
 | 
				
			||||||
                bert,
 | 
					                bert,
 | 
				
			||||||
                # prompt_phone_len=ph_offset,
 | 
					                # prompt_phone_len=ph_offset,
 | 
				
			||||||
                top_k=config["inference"]["top_k"],
 | 
					                top_k=config["inference"]["top_k"],
 | 
				
			||||||
                early_stop_num=hz * max_sec,
 | 
					                early_stop_num=hz * max_sec,
 | 
				
			||||||
            )
 | 
					            )
 | 
				
			||||||
        t3 = ttime()
 | 
					        t3 = ttime()
 | 
				
			||||||
        # print(pred_semantic.shape,idx)
 | 
					        print(pred_semantic,idx)
 | 
				
			||||||
        pred_semantic = pred_semantic[:, -idx:].unsqueeze(
 | 
					        pred_semantic = pred_semantic[:, -idx:].unsqueeze(
 | 
				
			||||||
            0
 | 
					            0
 | 
				
			||||||
        )  # .unsqueeze(0)#mq要多unsqueeze一次
 | 
					        )  # .unsqueeze(0)#mq要多unsqueeze一次
 | 
				
			||||||
@ -620,7 +621,7 @@ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
        inference_button.click(
 | 
					        inference_button.click(
 | 
				
			||||||
            get_tts_wav,
 | 
					            get_tts_wav,
 | 
				
			||||||
            [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut],
 | 
					            [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature],
 | 
				
			||||||
            [output],
 | 
					            [output],
 | 
				
			||||||
        )
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -228,6 +228,7 @@ class TextEncoder(nn.Module):
 | 
				
			|||||||
        )
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        y = self.ssl_proj(y * y_mask) * y_mask
 | 
					        y = self.ssl_proj(y * y_mask) * y_mask
 | 
				
			||||||
 | 
					     
 | 
				
			||||||
        y = self.encoder_ssl(y * y_mask, y_mask)
 | 
					        y = self.encoder_ssl(y * y_mask, y_mask)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        text_mask = torch.unsqueeze(
 | 
					        text_mask = torch.unsqueeze(
 | 
				
			||||||
@ -958,11 +959,13 @@ class SynthesizerTrn(nn.Module):
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    @torch.no_grad()
 | 
					    @torch.no_grad()
 | 
				
			||||||
    def decode(self, codes, text, refer, noise_scale=0.5):
 | 
					    def decode(self, codes, text, refer, noise_scale=0.5):
 | 
				
			||||||
        refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device)
 | 
					        ge = None
 | 
				
			||||||
        refer_mask = torch.unsqueeze(
 | 
					        if refer is not None:
 | 
				
			||||||
            commons.sequence_mask(refer_lengths, refer.size(2)), 1
 | 
					            refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device)
 | 
				
			||||||
        ).to(refer.dtype)
 | 
					            refer_mask = torch.unsqueeze(
 | 
				
			||||||
        ge = self.ref_enc(refer * refer_mask, refer_mask)
 | 
					                commons.sequence_mask(refer_lengths, refer.size(2)), 1
 | 
				
			||||||
 | 
					            ).to(refer.dtype)
 | 
				
			||||||
 | 
					            ge = self.ref_enc(refer * refer_mask, refer_mask)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device)
 | 
					        y_lengths = torch.LongTensor([codes.size(2) * 2]).to(codes.device)
 | 
				
			||||||
        text_lengths = torch.LongTensor([text.size(-1)]).to(text.device)
 | 
					        text_lengths = torch.LongTensor([text.size(-1)]).to(text.device)
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user