diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index a9b596b5..05681700 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -90,7 +90,7 @@ class G2PWOnnxConverter: sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL - sess_options.intra_op_num_threads = 2 + sess_options.intra_op_num_threads = 2 if torch.cuda.is_available() else 0 try: self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"),