Skip to content

Commit

Permalink
Revert memory monitor usage in conformance tests
Browse files Browse the repository at this point in the history
  • Loading branch information
nikita-savelyevv committed Jul 29, 2024
1 parent d11b3b7 commit b8bb91d
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 48 deletions.
5 changes: 0 additions & 5 deletions tests/post_training/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,3 @@ def pytest_addoption(parser):
action="store_true",
help="Add additional columns to reports.csv",
)
parser.addoption(
"--memory-monitor",
action="store_true",
help="Report memory using MemoryMonitor from tools/memory_monitor.py.",
)
19 changes: 1 addition & 18 deletions tests/post_training/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import gc
import os
import re
import time
Expand All @@ -31,9 +30,6 @@
import nncf
from nncf import TargetDevice
from tests.shared.command import Command
from tools.memory_monitor import MemoryType
from tools.memory_monitor import MemoryUnit
from tools.memory_monitor import monitor_memory_for_callable

DEFAULT_VAL_THREADS = 4

Expand Down Expand Up @@ -198,7 +194,6 @@ def __init__(
run_benchmark_app: bool,
params: dict = None,
batch_size: int = 1,
memory_monitor: bool = False,
) -> None:
self.reported_name = reported_name
self.model_id = model_id
Expand All @@ -209,7 +204,6 @@ def __init__(
self.reference_data = reference_data
self.params = params or {}
self.batch_size = batch_size
self.memory_monitor = memory_monitor
self.no_eval = no_eval
self.run_benchmark_app = run_benchmark_app
self.output_model_dir: Path = self.output_dir / self.reported_name / self.backend.value
Expand Down Expand Up @@ -357,18 +351,7 @@ def compress(self) -> None:
torch.set_num_threads(int(inference_num_threads))

start_time = time.perf_counter()
if self.memory_monitor:
gc.collect()
max_value_per_memory_type = monitor_memory_for_callable(
self._compress,
interval=0.1,
memory_unit=MemoryUnit.MiB,
return_max_value=True,
save_dir=self.output_model_dir / "ptq_memory_logs",
)
self.run_info.compression_memory_usage = max_value_per_memory_type[MemoryType.SYSTEM]
else:
self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True)
self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True)
self.run_info.time_compression = time.perf_counter() - start_time

def save_compressed_model(self) -> None:
Expand Down
18 changes: 2 additions & 16 deletions tests/post_training/pipelines/lm_weight_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc

import os
import re
import shutil
Expand All @@ -32,9 +32,6 @@
from tests.post_training.pipelines.base import BaseTestPipeline
from tests.post_training.pipelines.base import StatsFromOutput
from tests.shared.paths import TEST_ROOT
from tools.memory_monitor import MemoryType
from tools.memory_monitor import MemoryUnit
from tools.memory_monitor import monitor_memory_for_callable


@dataclass
Expand Down Expand Up @@ -181,18 +178,7 @@ def compress(self) -> None:

print("Weight compression...")
start_time = time.perf_counter()
if self.memory_monitor:
gc.collect()
max_value_per_memory_type = monitor_memory_for_callable(
self._compress,
interval=0.1,
memory_unit=MemoryUnit.MiB,
return_max_value=True,
save_dir=self.output_model_dir / "wc_memory_logs",
)
self.run_info.compression_memory_usage = max_value_per_memory_type[MemoryType.SYSTEM]
else:
self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True)
self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True)
self.run_info.time_compression = time.perf_counter() - start_time

def collect_data_from_stdout(self, stdout: str):
Expand Down
9 changes: 0 additions & 9 deletions tests/post_training/test_quantize_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,6 @@ def fixture_extra_columns(pytestconfig):
return pytestconfig.getoption("extra_columns")


@pytest.fixture(scope="session", name="memory_monitor")
def fixture_memory_monitor(pytestconfig):
return pytestconfig.getoption("memory_monitor")


def _parse_version(s: Path):
version_str = re.search(r".*_(\d+\.\d+).(?:yaml|yml)", s.name).group(1)
return version.parse(version_str)
Expand Down Expand Up @@ -247,7 +242,6 @@ def test_ptq_quantization(
run_benchmark_app: bool,
capsys: pytest.CaptureFixture,
extra_columns: bool,
memory_monitor: bool,
):
pipeline = None
err_msg = None
Expand All @@ -273,7 +267,6 @@ def test_ptq_quantization(
"no_eval": no_eval,
"run_benchmark_app": run_benchmark_app,
"batch_size": batch_size,
"memory_monitor": memory_monitor,
}
)
pipeline: BaseTestPipeline = pipeline_cls(**pipeline_kwargs)
Expand Down Expand Up @@ -318,7 +311,6 @@ def test_weight_compression(
run_benchmark_app: bool,
capsys: pytest.CaptureFixture,
extra_columns: bool,
memory_monitor: bool,
):
pipeline = None
err_msg = None
Expand All @@ -338,7 +330,6 @@ def test_weight_compression(
"no_eval": no_eval,
"run_benchmark_app": run_benchmark_app,
"batch_size": batch_size,
"memory_monitor": memory_monitor,
}
)
pipeline: BaseTestPipeline = pipeline_cls(**pipeline_kwargs)
Expand Down

0 comments on commit b8bb91d

Please sign in to comment.