diff --git a/GPT_SoVITS/TTS_infer_pack/TTS.py b/GPT_SoVITS/TTS_infer_pack/TTS.py index 53f392b0..04287312 100644 --- a/GPT_SoVITS/TTS_infer_pack/TTS.py +++ b/GPT_SoVITS/TTS_infer_pack/TTS.py @@ -583,11 +583,18 @@ class TTS: self.vocoder.cpu() del self.vocoder self.empty_cache() - - self.vocoder = BigVGAN.from_pretrained( - "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), - use_cuda_kernel=False, - ) # if True, RuntimeError: Ninja is required to load C++ extensions + if not vocoder_path: + self.vocoder = BigVGAN.from_pretrained( + "%s/GPT_SoVITS/pretrained_models/models--nvidia--bigvgan_v2_24khz_100band_256x" % (now_dir,), + use_cuda_kernel=False, + ) # if True, RuntimeError: Ninja is required to load C++ extensions + else: + self.vocoder = BigVGAN.from_pretrained( + vocoder_path, + use_cuda_kernel=False, + local_files_only=True, + cache_dir=vocoder_path + ) # remove weight norm in the model and set to eval mode self.vocoder.remove_weight_norm() diff --git a/GPT_SoVITS/process_ckpt.py b/GPT_SoVITS/process_ckpt.py index 338b2309..82fb158d 100644 --- a/GPT_SoVITS/process_ckpt.py +++ b/GPT_SoVITS/process_ckpt.py @@ -4,6 +4,12 @@ from time import time as ttime import shutil import os import torch +import sys +import GPT_SoVITS.utils as _gpt_utils +sys.modules['utils'] = _gpt_utils +from GPT_SoVITS.utils import HParams +HParams.__module__ = 'utils' +torch.serialization.add_safe_globals([HParams]) from GPT_SoVITS.tools.i18n.i18n import I18nAuto i18n = I18nAuto() @@ -120,5 +126,5 @@ def load_sovits_new(sovits_path): bio = BytesIO() bio.write(data) bio.seek(0) - return torch.load(bio, map_location="cpu", weights_only=False) - return torch.load(sovits_path, map_location="cpu", weights_only=False) + return torch.load(bio, map_location="cpu", weights_only=True) + return torch.load(sovits_path, map_location="cpu", weights_only=True)