diff --git a/GPT_SoVITS/TTS_infer_pack/TTS.py b/GPT_SoVITS/TTS_infer_pack/TTS.py index 1c5897a2..1d25e30a 100644 --- a/GPT_SoVITS/TTS_infer_pack/TTS.py +++ b/GPT_SoVITS/TTS_infer_pack/TTS.py @@ -1426,7 +1426,7 @@ class TTS: # else: # token_padding_length = 0 - audio_chunk, latent, latent_mask = self.vits_model.decode_steaming( + audio_chunk, latent, latent_mask = self.vits_model.decode_streaming( _semantic_tokens.unsqueeze(0), phones, refer_audio_spec, speed=speed_factor, diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index 5049017f..348ddb3f 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -1040,7 +1040,7 @@ class SynthesizerTrn(nn.Module): @torch.no_grad() - def decode_steaming(self, codes, text, refer, noise_scale=0.5, speed=1, sv_emb=None, result_length:int=None, overlap_frames:torch.Tensor=None, padding_length:int=None): + def decode_streaming(self, codes, text, refer, noise_scale=0.5, speed=1, sv_emb=None, result_length:int=None, overlap_frames:torch.Tensor=None, padding_length:int=None): def get_ge(refer, sv_emb): ge = None if refer is not None: