From e6a32e15b0025f7ff625c57c2c67645bab46b696 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sun, 23 Feb 2025 00:38:47 +0800 Subject: [PATCH] support sovits v3 lora training, 8G GPU memory is enough support sovits v3 lora training, 8G GPU memory is enough --- GPT_SoVITS/module/data_utils.py | 225 ++++++++++++++++++++++++++++ GPT_SoVITS/module/mel_processing.py | 2 +- GPT_SoVITS/module/models.py | 175 ++++++++++++++++++++-- 3 files changed, 387 insertions(+), 15 deletions(-) diff --git a/GPT_SoVITS/module/data_utils.py b/GPT_SoVITS/module/data_utils.py index 323bf1b..6ceca20 100644 --- a/GPT_SoVITS/module/data_utils.py +++ b/GPT_SoVITS/module/data_utils.py @@ -456,6 +456,231 @@ class TextAudioSpeakerCollateV3(): # return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, wav_padded, wav_lengths,mel_lengths return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths,mel_lengths +class TextAudioSpeakerLoaderV3b(torch.utils.data.Dataset): + """ + 1) loads audio, speaker_id, text pairs + 2) normalizes text and converts them to sequences of integers + 3) computes spectrograms from audio files. + """ + + def __init__(self, hparams, val=False): + exp_dir = hparams.exp_dir + self.path2 = "%s/2-name2text.txt" % exp_dir + self.path4 = "%s/4-cnhubert" % exp_dir + self.path5 = "%s/5-wav32k" % exp_dir + assert os.path.exists(self.path2) + assert os.path.exists(self.path4) + assert os.path.exists(self.path5) + names4 = set([name[:-3] for name in list(os.listdir(self.path4))]) # 去除.pt后缀 + names5 = set(os.listdir(self.path5)) + self.phoneme_data = {} + with open(self.path2, "r", encoding="utf8") as f: + lines = f.read().strip("\n").split("\n") + + for line in lines: + tmp = line.split("\t") + if (len(tmp) != 4): + continue + self.phoneme_data[tmp[0]] = [tmp[1]] + + self.audiopaths_sid_text = list(set(self.phoneme_data) & names4 & names5) + tmp = self.audiopaths_sid_text + leng = len(tmp) + min_num = 100 + if (leng < min_num): + self.audiopaths_sid_text = [] + for _ in range(max(2, int(min_num / leng))): + self.audiopaths_sid_text += tmp + self.max_wav_value = hparams.max_wav_value + self.sampling_rate = hparams.sampling_rate + self.filter_length = hparams.filter_length + self.hop_length = hparams.hop_length + self.win_length = hparams.win_length + self.sampling_rate = hparams.sampling_rate + self.val = val + + random.seed(1234) + random.shuffle(self.audiopaths_sid_text) + + print("phoneme_data_len:", len(self.phoneme_data.keys())) + print("wav_data_len:", len(self.audiopaths_sid_text)) + + audiopaths_sid_text_new = [] + lengths = [] + skipped_phone = 0 + skipped_dur = 0 + for audiopath in tqdm(self.audiopaths_sid_text): + try: + phoneme = self.phoneme_data[audiopath][0] + phoneme = phoneme.split(' ') + phoneme_ids = cleaned_text_to_sequence(phoneme, version) + except Exception: + print(f"{audiopath} not in self.phoneme_data !") + skipped_phone += 1 + continue + + size = os.path.getsize("%s/%s" % (self.path5, audiopath)) + duration = size / self.sampling_rate / 2 + + if duration == 0: + print(f"Zero duration for {audiopath}, skipping...") + skipped_dur += 1 + continue + + if 54 > duration > 0.6 or self.val: + audiopaths_sid_text_new.append([audiopath, phoneme_ids]) + lengths.append(size // (2 * self.hop_length)) + else: + skipped_dur += 1 + continue + + print("skipped_phone: ", skipped_phone, ", skipped_dur: ", skipped_dur) + print("total left: ", len(audiopaths_sid_text_new)) + assert len(audiopaths_sid_text_new) > 1 # 至少能凑够batch size,这里todo + self.audiopaths_sid_text = audiopaths_sid_text_new + self.lengths = lengths + self.spec_min=-12 + self.spec_max=2 + + self.filter_length_mel=self.win_length_mel=1024 + self.hop_length_mel=256 + self.n_mel_channels=100 + self.sampling_rate_mel=24000 + self.mel_fmin=0 + self.mel_fmax=None + def norm_spec(self, x): + return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 + + def get_audio_text_speaker_pair(self, audiopath_sid_text): + audiopath, phoneme_ids = audiopath_sid_text + text = torch.FloatTensor(phoneme_ids) + try: + spec, mel,wav = self.get_audio("%s/%s" % (self.path5, audiopath)) + with torch.no_grad(): + ssl = torch.load("%s/%s.pt" % (self.path4, audiopath), map_location="cpu") + if (ssl.shape[-1] != spec.shape[-1]): + typee = ssl.dtype + ssl = F.pad(ssl.float(), (0, 1), mode="replicate").to(typee) + ssl.requires_grad = False + except: + traceback.print_exc() + mel = torch.zeros(100, 180) + wav = torch.zeros(1, 96 * self.hop_length) + spec = torch.zeros(1025, 96) + ssl = torch.zeros(1, 768, 96) + text = text[-1:] + print("load audio or ssl error!!!!!!", audiopath) + return (ssl, spec, wav, mel, text) + + def get_audio(self, filename): + audio_array = load_audio(filename,self.sampling_rate)#load_audio的方法是已经归一化到-1~1之间的,不用再/32768 + audio=torch.FloatTensor(audio_array)#/32768 + audio_norm = audio + audio_norm = audio_norm.unsqueeze(0) + audio_array24 = load_audio(filename,24000)#load_audio的方法是已经归一化到-1~1之间的,不用再/32768######这里可以用GPU重采样加速 + audio24=torch.FloatTensor(audio_array24)#/32768 + audio_norm24 = audio24 + audio_norm24 = audio_norm24.unsqueeze(0) + + spec = spectrogram_torch(audio_norm, self.filter_length, + self.sampling_rate, self.hop_length, self.win_length, + center=False) + spec = torch.squeeze(spec, 0) + + + spec1 = spectrogram_torch(audio_norm24, self.filter_length_mel,self.sampling_rate_mel, self.hop_length_mel, self.win_length_mel,center=False) + mel = spec_to_mel_torch(spec1, self.filter_length_mel, self.n_mel_channels, self.sampling_rate_mel, self.mel_fmin, self.mel_fmax) + mel = torch.squeeze(mel, 0) + mel=self.norm_spec(mel) + # print(1111111,spec.shape,mel.shape) + return spec, mel,audio_norm + + def get_sid(self, sid): + sid = torch.LongTensor([int(sid)]) + return sid + + def __getitem__(self, index): + # with torch.no_grad(): + return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) + + def __len__(self): + return len(self.audiopaths_sid_text) +class TextAudioSpeakerCollateV3b(): + """ Zero-pads model inputs and targets + """ + + def __init__(self, return_ids=False): + self.return_ids = return_ids + + def __call__(self, batch): + """Collate's training batch from normalized text, audio and speaker identities + PARAMS + ------ + batch: [text_normalized, spec_normalized, wav_normalized, sid] + """ + #ssl, spec, wav,mel, text + # Right zero-pad all one-hot text sequences to max input length + _, ids_sorted_decreasing = torch.sort( + torch.LongTensor([x[1].size(1) for x in batch]), + dim=0, descending=True) +#(ssl, spec,mel, text) + max_ssl_len = max([x[0].size(2) for x in batch]) + + max_ssl_len1 = int(8 * ((max_ssl_len // 8) + 1)) + max_ssl_len = int(2 * ((max_ssl_len // 2) + 1)) + + # max_ssl_len = int(8 * ((max_ssl_len // 8) + 1)) + # max_ssl_len1=max_ssl_len + + max_spec_len = max([x[1].size(1) for x in batch]) + max_spec_len = int(2 * ((max_spec_len // 2) + 1)) + max_wav_len = max([x[2].size(1) for x in batch]) + max_text_len = max([x[4].size(0) for x in batch]) + max_mel_len=int(max_ssl_len1*1.25*1.5)###24000/256,32000/640=16000/320 + + ssl_lengths = torch.LongTensor(len(batch)) + spec_lengths = torch.LongTensor(len(batch)) + text_lengths = torch.LongTensor(len(batch)) + wav_lengths = torch.LongTensor(len(batch)) + mel_lengths = torch.LongTensor(len(batch)) + + spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) + mel_padded = torch.FloatTensor(len(batch), batch[0][3].size(0), max_mel_len) + ssl_padded = torch.FloatTensor(len(batch), batch[0][0].size(1), max_ssl_len) + text_padded = torch.LongTensor(len(batch), max_text_len) + wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) + + spec_padded.zero_() + mel_padded.zero_() + ssl_padded.zero_() + text_padded.zero_() + wav_padded.zero_() + + for i in range(len(ids_sorted_decreasing)): + row = batch[ids_sorted_decreasing[i]] + # ssl, spec, wav,mel, text + ssl = row[0] + ssl_padded[i, :, :ssl.size(2)] = ssl[0, :, :] + ssl_lengths[i] = ssl.size(2) + + spec = row[1] + spec_padded[i, :, :spec.size(1)] = spec + spec_lengths[i] = spec.size(1) + + wav = row[2] + wav_padded[i, :, :wav.size(1)] = wav + wav_lengths[i] = wav.size(1) + + mel = row[3] + mel_padded[i, :, :mel.size(1)] = mel + mel_lengths[i] = mel.size(1) + + text = row[4] + text_padded[i, :text.size(0)] = text + text_lengths[i] = text.size(0) + + return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths, wav_padded, wav_lengths,mel_lengths + # return ssl_padded, spec_padded,mel_padded, ssl_lengths, spec_lengths, text_padded, text_lengths,mel_lengths class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): """ diff --git a/GPT_SoVITS/module/mel_processing.py b/GPT_SoVITS/module/mel_processing.py index 503825e..d94b045 100644 --- a/GPT_SoVITS/module/mel_processing.py +++ b/GPT_SoVITS/module/mel_processing.py @@ -145,7 +145,7 @@ def mel_spectrogram_torch( return_complex=False, ) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) spec = torch.matmul(mel_basis[fmax_dtype_device], spec) spec = spectral_normalize_torch(spec) diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index d546fcd..338e88d 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -1099,17 +1099,15 @@ class CFM(torch.nn.Module): return x def forward(self, x1, x_lens, prompt_lens, mu, use_grad_ckpt): b, _, t = x1.shape - - # random timestep t = torch.rand([b], device=mu.device, dtype=x1.dtype) x0 = torch.randn_like(x1,device=mu.device) vt = x1 - x0 xt = x0 + t[:, None, None] * vt dt = torch.zeros_like(t,device=mu.device) prompt = torch.zeros_like(x1) - for bib in range(b): - prompt[bib, :, :prompt_lens[bib]] = x1[bib, :, :prompt_lens[bib]] - xt[bib, :, :prompt_lens[bib]] = 0 + for i in range(b): + prompt[i, :, :prompt_lens[i]] = x1[i, :, :prompt_lens[i]] + xt[i, :, :prompt_lens[i]] = 0 gailv=0.3# if ttime()>1736250488 else 0.1 if random.random() < gailv: base = torch.randint(2, 8, (t.shape[0],), device=mu.device) @@ -1128,14 +1126,15 @@ class CFM(torch.nn.Module): vt_pred = self.estimator(xt, prompt, x_lens, t,dt, mu, use_grad_ckpt).transpose(2,1) loss = 0 - - # print(45555555,estimator_out.shape,u.shape,x_lens,prompt_lens)#45555555 torch.Size([7, 465, 100]) torch.Size([7, 100, 465]) tensor([461, 461, 451, 451, 442, 442, 442], device='cuda:0') tensor([ 96, 93, 185, 59, 244, 262, 294], device='cuda:0') - for bib in range(b): - loss += self.criterion(vt_pred[bib, :, prompt_lens[bib]:x_lens[bib]], vt[bib, :, prompt_lens[bib]:x_lens[bib]]) + for i in range(b): + loss += self.criterion(vt_pred[i, :, prompt_lens[i]:x_lens[i]], vt[i, :, prompt_lens[i]:x_lens[i]]) loss /= b - return loss#, estimator_out + (1 - self.sigma_min) * z + return loss +def set_no_grad(net_g): + for name, param in net_g.named_parameters(): + param.requires_grad=False class SynthesizerTrnV3(nn.Module): """ @@ -1210,7 +1209,6 @@ class SynthesizerTrnV3(nn.Module): bins=1024 ) self.freeze_quantizer=freeze_quantizer - inter_channels2=512 self.bridge=nn.Sequential( nn.Conv1d(inter_channels, inter_channels2, 1, stride=1), @@ -1219,6 +1217,10 @@ class SynthesizerTrnV3(nn.Module): self.wns1=Encoder(inter_channels2, inter_channels2, inter_channels2, 5, 1, 8,gin_channels=gin_channels) self.linear_mel=nn.Conv1d(inter_channels2,100,1,stride=1) self.cfm = CFM(100,DiT(**dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=inter_channels2, conv_layers=4)),)#text_dim is condition feature dim + if self.freeze_quantizer==True: + set_no_grad(self.ssl_proj) + set_no_grad(self.quantizer) + set_no_grad(self.enc_p) def forward(self, ssl, y, mel,ssl_lengths,y_lengths, text, text_lengths,mel_lengths, use_grad_ckpt):#ssl_lengths no need now with autocast(enabled=False): @@ -1229,13 +1231,13 @@ class SynthesizerTrnV3(nn.Module): if self.freeze_quantizer: self.ssl_proj.eval()# self.quantizer.eval() + self.enc_p.eval() ssl = self.ssl_proj(ssl) quantized, codes, commit_loss, quantized_list = self.quantizer( ssl, layers=[0] ) - with maybe_no_grad: - quantized = F.interpolate(quantized, scale_factor=2, mode="nearest")##BCT - x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge) + quantized = F.interpolate(quantized, scale_factor=2, mode="nearest")##BCT + x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge) fea=self.bridge(x) fea = F.interpolate(fea, scale_factor=1.875, mode="nearest")##BCT fea, y_mask_ = self.wns1(fea, mel_lengths, ge)##If the 1-minute fine-tuning works fine, no need to manually adjust the learning rate. @@ -1274,3 +1276,148 @@ class SynthesizerTrnV3(nn.Module): ssl = self.ssl_proj(x) quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) return codes.transpose(0,1) + +class SynthesizerTrnV3b(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + semantic_frame_rate=None, + freeze_quantizer=None, + **kwargs): + + super().__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.model_dim=512 + self.use_sdp = use_sdp + self.enc_p = TextEncoder(inter_channels,hidden_channels,filter_channels,n_heads,n_layers,kernel_size,p_dropout) + # self.ref_enc = modules.MelStyleEncoder(spec_channels, style_vector_dim=gin_channels)###Rollback + self.ref_enc = modules.MelStyleEncoder(704, style_vector_dim=gin_channels)###Rollback + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, + upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, + gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + + ssl_dim = 768 + assert semantic_frame_rate in ['25hz', "50hz"] + self.semantic_frame_rate = semantic_frame_rate + if semantic_frame_rate == '25hz': + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 2, stride=2) + else: + self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1) + + self.quantizer = ResidualVectorQuantizer( + dimension=ssl_dim, + n_q=1, + bins=1024 + ) + self.freeze_quantizer=freeze_quantizer + + inter_channels2=512 + self.bridge=nn.Sequential( + nn.Conv1d(inter_channels, inter_channels2, 1, stride=1), + nn.LeakyReLU() + ) + self.wns1=Encoder(inter_channels2, inter_channels2, inter_channels2, 5, 1, 8,gin_channels=gin_channels) + self.linear_mel=nn.Conv1d(inter_channels2,100,1,stride=1) + self.cfm = CFM(100,DiT(**dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=inter_channels2, conv_layers=4)),)#text_dim is condition feature dim + + + def forward(self, ssl, y, mel,ssl_lengths,y_lengths, text, text_lengths,mel_lengths):#ssl_lengths no need now + with autocast(enabled=False): + y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, y.size(2)), 1).to(y.dtype) + ge = self.ref_enc(y[:,:704] * y_mask, y_mask) + # ge = self.ref_enc(y * y_mask, y_mask)#change back, new spec setting is whole 24k + # ge=None + maybe_no_grad = torch.no_grad() if self.freeze_quantizer else contextlib.nullcontext() + with maybe_no_grad: + if self.freeze_quantizer: + self.ssl_proj.eval() + self.quantizer.eval() + ssl = self.ssl_proj(ssl) + quantized, codes, commit_loss, quantized_list = self.quantizer( + ssl, layers=[0] + ) + quantized = F.interpolate(quantized, scale_factor=2, mode="nearest")##BCT + x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=ge) + z_p = self.flow(z, y_mask, g=ge) + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=ge) + fea=self.bridge(x) + fea = F.interpolate(fea, scale_factor=1.875, mode="nearest")##BCT + fea, y_mask_ = self.wns1(fea, mel_lengths, ge) + learned_mel = self.linear_mel(fea) + B=ssl.shape[0] + prompt_len_max = mel_lengths*2/3 + prompt_len = (torch.rand([B], device=fea.device) * prompt_len_max).floor().to(dtype=torch.long)# + minn=min(mel.shape[-1],fea.shape[-1]) + mel=mel[:,:,:minn] + fea=fea[:,:,:minn] + cfm_loss= self.cfm(mel, mel_lengths, prompt_len, fea)#fea==cond,y_lengths==target_mel_lengths#ge not need + return commit_loss,cfm_loss,F.mse_loss(learned_mel, mel),o, ids_slice, y_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), quantized + + @torch.no_grad() + def decode_encp(self, codes,text, refer,ge=None): + # print(2333333,refer.shape) + # ge=None + if(ge==None): + refer_lengths = torch.LongTensor([refer.size(2)]).to(refer.device) + refer_mask = torch.unsqueeze(commons.sequence_mask(refer_lengths, refer.size(2)), 1).to(refer.dtype) + ge = self.ref_enc(refer[:,:704] * refer_mask, refer_mask) + y_lengths = torch.LongTensor([int(codes.size(2)*2)]).to(codes.device) + y_lengths1 = torch.LongTensor([int(codes.size(2)*2.5*1.5)]).to(codes.device) + text_lengths = torch.LongTensor([text.size(-1)]).to(text.device) + + quantized = self.quantizer.decode(codes) + if self.semantic_frame_rate == '25hz': + quantized = F.interpolate(quantized, scale_factor=2, mode="nearest")##BCT + x, m_p, logs_p, y_mask = self.enc_p(quantized, y_lengths, text, text_lengths, ge) + fea=self.bridge(x) + fea = F.interpolate(fea, scale_factor=1.875, mode="nearest")##BCT + ####more wn paramter to learn mel + fea, y_mask_ = self.wns1(fea, y_lengths1, ge) + return fea,ge + + def extract_latent(self, x): + ssl = self.ssl_proj(x) + quantized, codes, commit_loss, quantized_list = self.quantizer(ssl) + return codes.transpose(0,1)