From 55badf45c56bed6e0491f080faf4d4fc277968c1 Mon Sep 17 00:00:00 2001 From: Jarod Mica Date: Mon, 23 Dec 2024 02:03:19 -0800 Subject: [PATCH] fix --- GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py | 2 +- GPT_SoVITS/api_v2.py | 2 +- GPT_SoVITS/inference_cli.py | 2 +- GPT_SoVITS/inference_gui.py | 2 +- GPT_SoVITS/inference_webui.py | 4 ++-- GPT_SoVITS/inference_webui_fast.py | 2 +- GPT_SoVITS/onnx_export.py | 2 +- GPT_SoVITS/prepare_datasets/1-get-text.py | 2 +- GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py | 2 +- GPT_SoVITS/prepare_datasets/3-get-semantic.py | 2 +- GPT_SoVITS/process_ckpt.py | 2 +- GPT_SoVITS/tools/asr/fasterwhisper_asr.py | 4 ++-- GPT_SoVITS/tools/my_utils.py | 2 +- GPT_SoVITS/tools/slice_audio.py | 2 +- GPT_SoVITS/tools/uvr5/webui.py | 4 ++-- api.py | 2 +- 16 files changed, 19 insertions(+), 19 deletions(-) diff --git a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py index beb31b93..0f1ab7a8 100644 --- a/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py +++ b/GPT_SoVITS/TTS_infer_pack/TextPreprocessor.py @@ -15,7 +15,7 @@ from GPT_SoVITS.text import cleaned_text_to_sequence from transformers import AutoModelForMaskedLM, AutoTokenizer from TTS_infer_pack.text_segmentation_method import split_big_text, splits, get_method as get_seg_method -from tools.i18n.i18n import I18nAuto, scan_language_list +from GPT_SoVITS.tools..i18n.i18n import I18nAuto, scan_language_list language=os.environ.get("language","Auto") language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language diff --git a/GPT_SoVITS/api_v2.py b/GPT_SoVITS/api_v2.py index 84e6ffd7..a22386a7 100644 --- a/GPT_SoVITS/api_v2.py +++ b/GPT_SoVITS/api_v2.py @@ -117,7 +117,7 @@ import uvicorn from importlib.resources import files from io import BytesIO -from tools.i18n.i18n import I18nAuto +from GPT_SoVITS.tools..i18n.i18n import I18nAuto from GPT_SoVITS.TTS_infer_pack.TTS import TTS, TTS_Config from GPT_SoVITS.TTS_infer_pack.text_segmentation_method import get_method_names as get_cut_method_names from fastapi.responses import StreamingResponse diff --git a/GPT_SoVITS/inference_cli.py b/GPT_SoVITS/inference_cli.py index bd987aaf..251ce791 100644 --- a/GPT_SoVITS/inference_cli.py +++ b/GPT_SoVITS/inference_cli.py @@ -2,7 +2,7 @@ import argparse import os import soundfile as sf -from tools.i18n.i18n import I18nAuto +from GPT_SoVITS.tools..i18n.i18n import I18nAuto from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav i18n = I18nAuto() diff --git a/GPT_SoVITS/inference_gui.py b/GPT_SoVITS/inference_gui.py index 93ea0f5d..c02120d4 100644 --- a/GPT_SoVITS/inference_gui.py +++ b/GPT_SoVITS/inference_gui.py @@ -5,7 +5,7 @@ from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushB from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox import soundfile as sf -from tools.i18n.i18n import I18nAuto +from GPT_SoVITS.tools..i18n.i18n import I18nAuto i18n = I18nAuto() from inference_webui import gpt_path, sovits_path, change_gpt_weights, change_sovits_weights, get_tts_wav diff --git a/GPT_SoVITS/inference_webui.py b/GPT_SoVITS/inference_webui.py index 8ca53c37..946ea59a 100644 --- a/GPT_SoVITS/inference_webui.py +++ b/GPT_SoVITS/inference_webui.py @@ -89,8 +89,8 @@ from GPT_SoVITS.text import cleaned_text_to_sequence from GPT_SoVITS.text.cleaner import clean_text from time import time as ttime from GPT_SoVITS.module.mel_processing import spectrogram_torch -from tools.my_utils import load_audio -from tools.i18n.i18n import I18nAuto, scan_language_list +from GPT_SoVITS.tools..my_utils import load_audio +from GPT_SoVITS.tools..i18n.i18n import I18nAuto, scan_language_list language=os.environ.get("language","Auto") language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language diff --git a/GPT_SoVITS/inference_webui_fast.py b/GPT_SoVITS/inference_webui_fast.py index dcc2bcf9..9ea80455 100644 --- a/GPT_SoVITS/inference_webui_fast.py +++ b/GPT_SoVITS/inference_webui_fast.py @@ -46,7 +46,7 @@ version=os.environ.get("version","v2") import gradio as gr from TTS_infer_pack.TTS import TTS, TTS_Config from TTS_infer_pack.text_segmentation_method import get_method -from tools.i18n.i18n import I18nAuto, scan_language_list +from GPT_SoVITS.tools..i18n.i18n import I18nAuto, scan_language_list language=os.environ.get("language","Auto") language=sys.argv[-1] if sys.argv[-1] in scan_language_list() else language diff --git a/GPT_SoVITS/onnx_export.py b/GPT_SoVITS/onnx_export.py index 092be008..80954710 100644 --- a/GPT_SoVITS/onnx_export.py +++ b/GPT_SoVITS/onnx_export.py @@ -10,7 +10,7 @@ cnhubert.cnhubert_base_path = cnhubert_base_path ssl_model = cnhubert.get_model() from GPT_SoVITS.text import cleaned_text_to_sequence import soundfile -from tools.my_utils import load_audio +from GPT_SoVITS.tools..my_utils import load_audio import os import json diff --git a/GPT_SoVITS/prepare_datasets/1-get-text.py b/GPT_SoVITS/prepare_datasets/1-get-text.py index ef569488..38cab467 100644 --- a/GPT_SoVITS/prepare_datasets/1-get-text.py +++ b/GPT_SoVITS/prepare_datasets/1-get-text.py @@ -21,7 +21,7 @@ from tqdm import tqdm from GPT_SoVITS.text.cleaner import clean_text from transformers import AutoModelForMaskedLM, AutoTokenizer import numpy as np -from tools.my_utils import clean_path +from GPT_SoVITS.tools..my_utils import clean_path # inp_text=sys.argv[1] # inp_wav_dir=sys.argv[2] diff --git a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py index 27b61f27..32fe8045 100644 --- a/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py +++ b/GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py @@ -19,7 +19,7 @@ from scipy.io import wavfile import librosa now_dir = os.getcwd() sys.path.append(now_dir) -from tools.my_utils import load_audio,clean_path +from GPT_SoVITS.tools..my_utils import load_audio,clean_path # from config import cnhubert_base_path # cnhubert.cnhubert_base_path=cnhubert_base_path diff --git a/GPT_SoVITS/prepare_datasets/3-get-semantic.py b/GPT_SoVITS/prepare_datasets/3-get-semantic.py index 365b3a97..602877cd 100644 --- a/GPT_SoVITS/prepare_datasets/3-get-semantic.py +++ b/GPT_SoVITS/prepare_datasets/3-get-semantic.py @@ -24,7 +24,7 @@ from glob import glob from tqdm import tqdm import logging, librosa, utils from GPT_SoVITS.module.models import SynthesizerTrn -from tools.my_utils import clean_path +from GPT_SoVITS.tools..my_utils import clean_path logging.getLogger("numba").setLevel(logging.WARNING) # from config import pretrained_s2G diff --git a/GPT_SoVITS/process_ckpt.py b/GPT_SoVITS/process_ckpt.py index 3a436f10..0fd4783e 100644 --- a/GPT_SoVITS/process_ckpt.py +++ b/GPT_SoVITS/process_ckpt.py @@ -3,7 +3,7 @@ from collections import OrderedDict from time import time as ttime import shutil,os import torch -from tools.i18n.i18n import I18nAuto +from GPT_SoVITS.tools..i18n.i18n import I18nAuto i18n = I18nAuto() diff --git a/GPT_SoVITS/tools/asr/fasterwhisper_asr.py b/GPT_SoVITS/tools/asr/fasterwhisper_asr.py index d46cbbd7..ba124dc2 100644 --- a/GPT_SoVITS/tools/asr/fasterwhisper_asr.py +++ b/GPT_SoVITS/tools/asr/fasterwhisper_asr.py @@ -9,7 +9,7 @@ import torch from faster_whisper import WhisperModel from tqdm import tqdm -from tools.asr.config import check_fw_local_models +from GPT_SoVITS.tools..asr.config import check_fw_local_models language_code_list = [ "af", "am", "ar", "as", "az", @@ -69,7 +69,7 @@ def execute_asr(input_folder, output_folder, model_size, language, precision): if info.language == "zh": print("检测为中文文本, 转 FunASR 处理") if("only_asr" not in globals()): - from tools.asr.funasr_asr import only_asr #如果用英文就不需要导入下载模型 + from GPT_SoVITS.tools..asr.funasr_asr import only_asr #如果用英文就不需要导入下载模型 text = only_asr(file_path, language=info.language.lower()) if text == '': diff --git a/GPT_SoVITS/tools/my_utils.py b/GPT_SoVITS/tools/my_utils.py index c1469234..60bdd0fb 100644 --- a/GPT_SoVITS/tools/my_utils.py +++ b/GPT_SoVITS/tools/my_utils.py @@ -2,7 +2,7 @@ import platform,os,traceback import ffmpeg import numpy as np import gradio as gr -from tools.i18n.i18n import I18nAuto +from GPT_SoVITS.tools..i18n.i18n import I18nAuto import pandas as pd i18n = I18nAuto(language=os.environ.get('language','Auto')) diff --git a/GPT_SoVITS/tools/slice_audio.py b/GPT_SoVITS/tools/slice_audio.py index b9912ae0..2708a0d0 100644 --- a/GPT_SoVITS/tools/slice_audio.py +++ b/GPT_SoVITS/tools/slice_audio.py @@ -3,7 +3,7 @@ import traceback from scipy.io import wavfile # parent_directory = os.path.dirname(os.path.abspath(__file__)) # sys.path.append(parent_directory) -from tools.my_utils import load_audio +from GPT_SoVITS.tools..my_utils import load_audio from slicer2 import Slicer def slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_max,alpha,i_part,all_part): diff --git a/GPT_SoVITS/tools/uvr5/webui.py b/GPT_SoVITS/tools/uvr5/webui.py index dc6dd024..abf46b60 100644 --- a/GPT_SoVITS/tools/uvr5/webui.py +++ b/GPT_SoVITS/tools/uvr5/webui.py @@ -1,8 +1,8 @@ import os import traceback,gradio as gr import logging -from tools.i18n.i18n import I18nAuto -from tools.my_utils import clean_path +from GPT_SoVITS.tools..i18n.i18n import I18nAuto +from GPT_SoVITS.tools..my_utils import clean_path i18n = I18nAuto() logger = logging.getLogger(__name__) diff --git a/api.py b/api.py index 69c3ecc3..3965ab27 100644 --- a/api.py +++ b/api.py @@ -167,7 +167,7 @@ from GPT_SoVITS.AR.models.t2s_lightning_module import Text2SemanticLightningModu from GPT_SoVITS.text import cleaned_text_to_sequence from GPT_SoVITS.text.cleaner import clean_text from GPT_SoVITS.module.mel_processing import spectrogram_torch -from tools.my_utils import load_audio +from GPT_SoVITS.tools..my_utils import load_audio import config as global_config import logging import subprocess