From 25a2bbd3b9ba1d0bb83fb6d81e9b3a4dbe7be745 Mon Sep 17 00:00:00 2001 From: KamioRinn Date: Sun, 21 Jul 2024 01:40:29 +0800 Subject: [PATCH] Replace deprecated function --- GPT_SoVITS/module/attentions.py | 3 ++- GPT_SoVITS/module/models.py | 3 ++- GPT_SoVITS/module/models_onnx.py | 3 ++- GPT_SoVITS/module/modules.py | 15 ++++++++------- GPT_SoVITS/module/mrte_model.py | 3 ++- 5 files changed, 16 insertions(+), 11 deletions(-) diff --git a/GPT_SoVITS/module/attentions.py b/GPT_SoVITS/module/attentions.py index a2e9e515..31ca0f9d 100644 --- a/GPT_SoVITS/module/attentions.py +++ b/GPT_SoVITS/module/attentions.py @@ -433,7 +433,8 @@ class FFN(nn.Module): import torch.nn as nn -from torch.nn.utils import remove_weight_norm, weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm class Depthwise_Separable_Conv1D(nn.Module): diff --git a/GPT_SoVITS/module/models.py b/GPT_SoVITS/module/models.py index 58a21eee..7586b8c3 100644 --- a/GPT_SoVITS/module/models.py +++ b/GPT_SoVITS/module/models.py @@ -9,7 +9,8 @@ from module import modules from module import attentions from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm, spectral_norm from module.commons import init_weights, get_padding from module.mrte_model import MRTE from module.quantize import ResidualVectorQuantizer diff --git a/GPT_SoVITS/module/models_onnx.py b/GPT_SoVITS/module/models_onnx.py index 232fd74d..48201121 100644 --- a/GPT_SoVITS/module/models_onnx.py +++ b/GPT_SoVITS/module/models_onnx.py @@ -9,7 +9,8 @@ from module import modules from module import attentions_onnx as attentions from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm, spectral_norm from module.commons import init_weights, get_padding from module.mrte_model import MRTE from module.quantize import ResidualVectorQuantizer diff --git a/GPT_SoVITS/module/modules.py b/GPT_SoVITS/module/modules.py index f4447455..91b28632 100644 --- a/GPT_SoVITS/module/modules.py +++ b/GPT_SoVITS/module/modules.py @@ -5,7 +5,8 @@ from torch import nn from torch.nn import functional as F from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm from module import commons from module.commons import init_weights, get_padding @@ -159,7 +160,7 @@ class WN(torch.nn.Module): cond_layer = torch.nn.Conv1d( gin_channels, 2 * hidden_channels * n_layers, 1 ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + self.cond_layer = weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i @@ -171,7 +172,7 @@ class WN(torch.nn.Module): dilation=dilation, padding=padding, ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + in_layer = weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary @@ -181,7 +182,7 @@ class WN(torch.nn.Module): res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + res_skip_layer = weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): @@ -213,11 +214,11 @@ class WN(torch.nn.Module): def remove_weight_norm(self): if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) + remove_weight_norm(self.cond_layer) for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) + remove_weight_norm(l) for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) + remove_weight_norm(l) class ResBlock1(torch.nn.Module): diff --git a/GPT_SoVITS/module/mrte_model.py b/GPT_SoVITS/module/mrte_model.py index b0cd242c..14aeb46a 100644 --- a/GPT_SoVITS/module/mrte_model.py +++ b/GPT_SoVITS/module/mrte_model.py @@ -2,7 +2,8 @@ import torch from torch import nn -from torch.nn.utils import remove_weight_norm, weight_norm +from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm from module.attentions import MultiHeadAttention