From bce451a2d1641e581e200297d01f219aeaaf7299 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Thu, 1 Aug 2024 21:47:30 +0800 Subject: [PATCH] fix cpu+fp16 inference issue fix cpu+fp16 inference issue --- GPT_SoVITS/prepare_datasets/1-get-text.py | 4 ++-- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 5 +++-- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index e01a63b..7be2508 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -10,13 +10,13 @@ all_parts = os.environ.get("all_parts") os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES") opt_dir = os.environ.get("opt_dir") bert_pretrained_dir = os.environ.get("bert_pretrained_dir") -is_half = eval(os.environ.get("is_half", "True")) +import torch +is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() import sys, numpy as np, traceback, pdb import os.path from glob import glob from tqdm import tqdm from text.cleaner import clean_text -import torch from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 17394ee..00ebab5 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -10,11 +10,12 @@ os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES") from feature_extractor import cnhubert opt_dir= os.environ.get("opt_dir") cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir") -is_half=eval(os.environ.get("is_half","True")) +import torch +is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() import pdb,traceback,numpy as np,logging from scipy.io import wavfile -import librosa,torch +import librosa now_dir = os.getcwd() sys.path.append(now_dir) from tools.my_utils import load_audio diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 3448a58..64e640e 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -8,7 +8,8 @@ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES") opt_dir = os.environ.get("opt_dir") pretrained_s2G = os.environ.get("pretrained_s2G") s2config_path = os.environ.get("s2config_path") -is_half = eval(os.environ.get("is_half", "True")) +import torch +is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() import math, traceback import multiprocessing import sys, pdb @@ -19,7 +20,7 @@ from random import shuffle import torch.multiprocessing as mp from glob import glob from tqdm import tqdm -import logging, librosa, utils, torch +import logging, librosa, utils from module.models import SynthesizerTrn logging.getLogger("numba").setLevel(logging.WARNING)