From e3e147d4f9a2b9d4d3cc434259f12cac138618c3 Mon Sep 17 00:00:00 2001 From: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Date: Wed, 14 May 2025 09:36:23 +0100 Subject: [PATCH] Use Multithread in ONNX Session --- GPT_SoVITS/text/g2pw/onnx_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index a9b596b5..05681700 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -90,7 +90,7 @@ class G2PWOnnxConverter: sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL - sess_options.intra_op_num_threads = 2 + sess_options.intra_op_num_threads = 2 if torch.cuda.is_available() else 0 try: self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"),