From b7e7a0958994c19a76b455082ddd844b40f88975 Mon Sep 17 00:00:00 2001 From: hcwu1993 <15855138469@163.com> Date: Thu, 2 May 2024 21:26:44 +0800 Subject: [PATCH] modify freeze_quantizer mode, avoid quantizer's codebook updating (#953) --- GPT_SoVITS/module/models.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index be0e07c1..26840ccc 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -16,6 +16,7 @@ from module.mrte_model import MRTE from module.quantize import ResidualVectorQuantizer from text import symbols from torch.cuda.amp import autocast +import contextlib class StochasticDurationPredictor(nn.Module): @@ -891,9 +892,10 @@ class SynthesizerTrn(nn.Module): self.ssl_proj = nn.Conv1d(ssl_dim, ssl_dim, 1, stride=1) self.quantizer = ResidualVectorQuantizer(dimension=ssl_dim, n_q=1, bins=1024) - if freeze_quantizer: - self.ssl_proj.requires_grad_(False) - self.quantizer.requires_grad_(False) + self.freeze_quantizer = freeze_quantizer + # if freeze_quantizer: + # self.ssl_proj.requires_grad_(False) + # self.quantizer.requires_grad_(False) #self.quantizer.eval() # self.enc_p.text_embedding.requires_grad_(False) # self.enc_p.encoder_text.requires_grad_(False)