diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index 374c9a4..32fc2c0 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -86,10 +86,10 @@ class G2PWOnnxConverter: sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL sess_options.intra_op_num_threads = 2 - self.session_g2pW = onnxruntime.InferenceSession( - os.path.join(uncompress_path, 'g2pW.onnx'), - sess_options=sess_options, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) - # sess_options=sess_options) + try: + self.session_g2pW = onnxruntime.InferenceSession(os.path.join(uncompress_path, 'g2pW.onnx'),sess_options=sess_options, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) + except: + self.session_g2pW = onnxruntime.InferenceSession(os.path.join(uncompress_path, 'g2pW.onnx'),sess_options=sess_options, providers=['CPUExecutionProvider']) self.config = load_config( config_path=os.path.join(uncompress_path, 'config.py'), use_default=True)