diff --git a/tests/onnx/benchmarking/accuracy_checker.py b/tests/onnx/benchmarking/ac_wrapper.py similarity index 100% rename from tests/onnx/benchmarking/accuracy_checker.py rename to tests/onnx/benchmarking/ac_wrapper.py diff --git a/tests/onnx/test_e2e_ptq.py b/tests/onnx/test_e2e_ptq.py index 84574571795..bfd58ab0878 100644 --- a/tests/onnx/test_e2e_ptq.py +++ b/tests/onnx/test_e2e_ptq.py @@ -184,7 +184,7 @@ def is_ov(request): def _read_accuracy_checker_result(root_dir: Path, key: str) -> pd.DataFrame: dfs = [] for task in TASKS: - csv_fp = str(root_dir / task / f"accuracy_checker-{key}.csv") + csv_fp = str(root_dir / task / f"ac_wrapper-{key}.csv") dfs += [pd.read_csv(csv_fp)] df = pd.concat(dfs, axis=0) df = df[["model", "metric_value", "metric_name", "tags"]] @@ -402,7 +402,7 @@ def test_reference_model_accuracy( anno_dir, output_dir, eval_size, - program="accuracy_checker.py", + program="ac_wrapper.py", is_quantized=False, is_ov_ep=False, is_cpu_ep=True, @@ -445,7 +445,7 @@ def test_onnx_rt_quantized_model_accuracy( anno_dir, output_dir, eval_size, - program="accuracy_checker.py", + program="ac_wrapper.py", is_quantized=True, is_ov_ep=is_ov_ep, is_cpu_ep=is_cpu_ep, @@ -474,7 +474,7 @@ def test_ov_quantized_model_accuracy( anno_dir, output_dir, eval_size, - program="accuracy_checker.py", + program="ac_wrapper.py", is_quantized=True, ) run_command(command) diff --git a/tests/post_training/pipelines/base.py b/tests/post_training/pipelines/base.py index 3f7dde51993..e3f35f35657 100644 --- a/tests/post_training/pipelines/base.py +++ b/tests/post_training/pipelines/base.py @@ -336,9 +336,9 @@ def compress(self) -> None: print("Quantization...") if self.backend in PT_BACKENDS: - cpu_threads_num = os.environ.get("CPU_THREADS_NUM") - if cpu_threads_num is not None: - torch.set_num_threads(int(cpu_threads_num)) + inference_num_threads = os.environ.get("INFERENCE_NUM_THREADS") + if inference_num_threads is not None: + torch.set_num_threads(int(inference_num_threads)) start_time = time.perf_counter() self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True) diff --git a/tests/post_training/pipelines/image_classification_timm.py b/tests/post_training/pipelines/image_classification_timm.py index b39cce33d4d..c17dc3e2ef4 100644 --- a/tests/post_training/pipelines/image_classification_timm.py +++ b/tests/post_training/pipelines/image_classification_timm.py @@ -127,10 +127,10 @@ def _validate(self): core = ov.Core() - if os.environ.get("CPU_THREADS_NUM"): + if os.environ.get("INFERENCE_NUM_THREADS"): # Set CPU_THREADS_NUM for OpenVINO inference - cpu_threads_num = os.environ.get("CPU_THREADS_NUM") - core.set_property("CPU", properties={"CPU_THREADS_NUM": str(cpu_threads_num)}) + inference_num_threads = os.environ.get("INFERENCE_NUM_THREADS") + core.set_property("CPU", properties={"INFERENCE_NUM_THREADS": str(inference_num_threads)}) ov_model = core.read_model(self.path_compressed_ir) compiled_model = core.compile_model(ov_model) diff --git a/tests/post_training/pipelines/lm_weight_compression.py b/tests/post_training/pipelines/lm_weight_compression.py index 1d69967bce1..b1a6e5853dc 100644 --- a/tests/post_training/pipelines/lm_weight_compression.py +++ b/tests/post_training/pipelines/lm_weight_compression.py @@ -188,10 +188,10 @@ def _validate(self): is_stateful = self.params.get("is_stateful", False) core = ov.Core() - if os.environ.get("CPU_THREADS_NUM"): + if os.environ.get("INFERENCE_NUM_THREADS"): # Set CPU_THREADS_NUM for OpenVINO inference - cpu_threads_num = os.environ.get("CPU_THREADS_NUM") - core.set_property("CPU", properties={"CPU_THREADS_NUM": str(cpu_threads_num)}) + inference_num_threads = os.environ.get("INFERENCE_NUM_THREADS") + core.set_property("CPU", properties={"INFERENCE_NUM_THREADS": str(inference_num_threads)}) gt_data_path = TEST_ROOT / "post_training" / "data" / "wwb_ref_answers" / self.fp32_model_name / "ref_qa.csv" gt_data_path.parent.mkdir(parents=True, exist_ok=True)