From 4d47869225868cea31707e90c4490dbcb2a388fd Mon Sep 17 00:00:00 2001
From: Alexander Dokuchaev <alexander.dokuchaev@intel.com>
Date: Fri, 13 Oct 2023 13:55:38 +0300
Subject: [PATCH] Add --no-eval arg to test_quantize_conformance.py (#2179)

### Changes

Add `--no-eval` argument to debug
---
 tests/post_training/conftest.py                  | 5 +++--
 tests/post_training/pipelines/base.py            | 6 ++++++
 tests/post_training/test_quantize_conformance.py | 8 +++++++-
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/tests/post_training/conftest.py b/tests/post_training/conftest.py
index 7bf887d1c38..fe38d0eb510 100644
--- a/tests/post_training/conftest.py
+++ b/tests/post_training/conftest.py
@@ -22,8 +22,9 @@
 
 
 def pytest_addoption(parser):
-    parser.addoption("--data", action="store")
-    parser.addoption("--output", action="store", default="./tmp/")
+    parser.addoption("--data", action="store", help="Data directory")
+    parser.addoption("--output", action="store", default="./tmp/", help="Directory to store artifacts")
+    parser.addoption("--no-eval", action="store_true", help="Skip validation step")
 
 
 def pytest_configure(config):
diff --git a/tests/post_training/pipelines/base.py b/tests/post_training/pipelines/base.py
index 0306fc9594e..bc97a6a1b4b 100644
--- a/tests/post_training/pipelines/base.py
+++ b/tests/post_training/pipelines/base.py
@@ -108,6 +108,7 @@ def __init__(
         output_dir: Path,
         data_dir: Path,
         reference_data: dict,
+        no_eval: bool,
         params: dict = None,
     ) -> None:
         self.reported_name = reported_name
@@ -118,6 +119,7 @@ def __init__(
         self.data_dir = Path(data_dir)
         self.reference_data = reference_data
         self.params = params or {}
+        self.no_eval = no_eval
 
         self.output_model_dir = self.output_dir / self.reported_name / self.backend.value
         self.output_model_dir.mkdir(parents=True, exist_ok=True)
@@ -250,7 +252,11 @@ def validate(self) -> None:
         """
         Validate and compare result with reference
         """
+        if self.no_eval:
+            print("Validation skipped")
+            return
         print("Validation...")
+
         self._validate()
 
         metric_value = self.run_info.metric_value
diff --git a/tests/post_training/test_quantize_conformance.py b/tests/post_training/test_quantize_conformance.py
index 2f4620a2718..34327625ba4 100644
--- a/tests/post_training/test_quantize_conformance.py
+++ b/tests/post_training/test_quantize_conformance.py
@@ -37,6 +37,11 @@ def fixture_result(pytestconfig):
     return pytestconfig.test_results
 
 
+@pytest.fixture(scope="session", name="no_eval")
+def fixture_no_eval(pytestconfig):
+    return pytestconfig.getoption("no_eval")
+
+
 def read_reference_data():
     path_reference = Path(__file__).parent / "reference_data.yaml"
     with path_reference.open() as f:
@@ -48,7 +53,7 @@ def read_reference_data():
 
 
 @pytest.mark.parametrize("test_case_name", TEST_CASES.keys())
-def test_ptq_quantization(test_case_name, data, output, result):
+def test_ptq_quantization(test_case_name, data, output, result, no_eval):
     pipeline = None
     err_msg = None
     test_model_param = None
@@ -75,6 +80,7 @@ def test_ptq_quantization(test_case_name, data, output, result):
             "output_dir": output,
             "data_dir": data,
             "reference_data": REFERENCE_DATA[test_case_name],
+            "no_eval": no_eval,
         }
 
         pipeline = pipeline_cls(**pipeline_kwargs)