Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
vshampor committed Oct 18, 2023
1 parent 8ea4afc commit 39e57ff
Show file tree
Hide file tree
Showing 28 changed files with 201 additions and 177 deletions.
2 changes: 1 addition & 1 deletion examples/experimental/openvino/bert/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
from pathlib import Path
from typing import Any, Iterable

import datasets
import evaluate
import numpy as np
import openvino.runtime as ov
import torch
import transformers

import datasets
import nncf
from nncf.parameters import ModelType

Expand Down
2 changes: 0 additions & 2 deletions nncf/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,5 +145,3 @@ def validate(loaded_json):
# Passed a list of dicts
for compression_algo_dict in compression_section:
validate_single_compression_algo_schema(compression_algo_dict, REF_VS_ALGO_SCHEMA)


2 changes: 1 addition & 1 deletion nncf/config/extractors.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,4 +226,4 @@ def validate_accuracy_aware_schema(config: NNCFConfig, params: Dict[str, object]


def has_input_info_field(config: NNCFConfig) -> bool:
return config.get('input_info') is not None
return config.get("input_info") is not None
5 changes: 3 additions & 2 deletions nncf/config/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,9 @@
" during tracing and exporting.",
),
"keyword": with_attributes(
STRING, description="Keyword to be used when passing the tensor to the model's 'forward' method - "
"leave unspecified to pass the corresponding argument as a positional arg."
STRING,
description="Keyword to be used when passing the tensor to the model's 'forward' method - "
"leave unspecified to pass the corresponding argument as a positional arg.",
),
},
"additionalProperties": False,
Expand Down
10 changes: 4 additions & 6 deletions nncf/experimental/torch/quantization/quantize_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, Optional, Tuple
from typing import Optional

import torch

Expand All @@ -20,7 +20,7 @@
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from nncf.torch.dynamic_graph.io_handling import get_input_infos_from_dataset
from nncf.torch.dynamic_graph.io_handling import ExactInputsInfo
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import training_mode_switcher

Expand All @@ -35,12 +35,10 @@ def create_nncf_network_ptq(model: torch.nn.Module, dataset: Dataset) -> NNCFNet
:return: NNCFNetwork instance for the input model
"""

input_info_list = get_input_infos_from_dataset(dataset)
input_info = ExactInputsInfo.from_nncf_dataset(dataset)

with training_mode_switcher(model, is_training=False):
nncf_network = NNCFNetwork(
model, input_info=input_info_list
)
nncf_network = NNCFNetwork(model, input_info=input_info)

nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()

Expand Down
4 changes: 2 additions & 2 deletions nncf/torch/dynamic_graph/graph_tracer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Callable, List, Optional
from typing import Any, Callable, Optional

import torch

Expand All @@ -19,6 +18,7 @@
from nncf.torch.dynamic_graph.io_handling import ModelInputInfo
from nncf.torch.utils import get_model_device


class GraphTracer:
def __init__(self, custom_forward_fn: Callable[[torch.nn.Module], Any]):
self.custom_forward_fn = custom_forward_fn
Expand Down
49 changes: 26 additions & 23 deletions nncf/torch/dynamic_graph/io_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections import OrderedDict
from copy import deepcopy
from inspect import Parameter
from inspect import Signature
from typing import Any, List, Optional, Type, Protocol
from typing import Tuple
from typing import Dict, Set
from typing import Any, Dict, List, Optional, Protocol, Set, Tuple, Type

import torch

Expand All @@ -26,7 +23,9 @@
from nncf.common.initialization.dataloader import NNCFDataLoader
from nncf.common.logging import nncf_logger
from nncf.common.utils.api_marker import api
from nncf.config.structures import BNAdaptationInitArgs, QuantizationRangeInitArgs, NNCFExtraConfigStruct
from nncf.config.structures import BNAdaptationInitArgs
from nncf.config.structures import NNCFExtraConfigStruct
from nncf.config.structures import QuantizationRangeInitArgs
from nncf.torch.dynamic_graph.context import forward_nncf_trace
from nncf.torch.dynamic_graph.patch_pytorch import register_operator
from nncf.torch.initialization import wrap_dataloader_for_init
Expand Down Expand Up @@ -155,20 +154,27 @@ def from_nncf_config(cls, config: NNCFConfig):
if input_infos is None:
raise RuntimeError("Passed NNCFConfig does not have an 'input_info' field")
if isinstance(input_infos, dict):
return FillerInputInfo([FillerInputElement(
input_infos.get("sample_size"),
input_infos.get("type"),
input_infos.get("keyword"),
input_infos.get("filler"))])
return FillerInputInfo(
[
FillerInputElement(
input_infos.get("sample_size"),
input_infos.get("type"),
input_infos.get("keyword"),
input_infos.get("filler"),
)
]
)
if isinstance(input_infos, list):
elements: List[FillerInputElement] = []
for info_dict in input_infos:
elements.append(FillerInputElement(
info_dict.get("sample_size"),
info_dict.get("type"),
info_dict.get("keyword"),
info_dict.get("filler")
))
elements.append(
FillerInputElement(
info_dict.get("sample_size"),
info_dict.get("type"),
info_dict.get("keyword"),
info_dict.get("filler"),
)
)
return FillerInputInfo(elements)
raise RuntimeError("Invalid input_infos specified in config - should be either dict or list of dicts")

Expand Down Expand Up @@ -217,7 +223,9 @@ def from_nncf_config_dataloaders(cls, config: NNCFConfig) -> Optional["ExactInpu


class InputInfoWrapManager:
def __init__(self, input_info: ModelInputInfo, fwd_signature: Signature, module_ref_for_device: torch.nn.Module = None):
def __init__(
self, input_info: ModelInputInfo, fwd_signature: Signature, module_ref_for_device: torch.nn.Module = None
):
self._fwd_signature = fwd_signature
self._module_ref_for_device = module_ref_for_device
args, kwargs = input_info.get_forward_inputs()
Expand Down Expand Up @@ -264,14 +272,9 @@ def wrap_inputs(self, model_args: Tuple, model_kwargs: Dict) -> Tuple[Tuple, Dic
return bound_model_params.args, bound_model_params.kwargs



class HasDataloader(Protocol):
def get_dataloader(self) -> NNCFDataLoader:
pass


EXTRA_STRUCTS_WITH_DATALOADERS: List[Type[NNCFExtraConfigStruct]] = [
QuantizationRangeInitArgs,
BNAdaptationInitArgs
]

EXTRA_STRUCTS_WITH_DATALOADERS: List[Type[NNCFExtraConfigStruct]] = [QuantizationRangeInitArgs, BNAdaptationInitArgs]
2 changes: 0 additions & 2 deletions nncf/torch/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from functools import partial
from typing import Any, Tuple

Expand Down Expand Up @@ -139,7 +138,6 @@ def to_single_batch_tensors(obj: torch.Tensor):
args = objwalk(args, is_tensor, to_single_batch_tensors)
kwargs = objwalk(kwargs, is_tensor, to_single_batch_tensors)
export_args = (*args, kwargs) # according to a variant of passing kwargs in torch.onnx.export doc
num_tensors = 0

if self._input_names is not None:
input_names = self._input_names
Expand Down
8 changes: 2 additions & 6 deletions nncf/torch/graph/graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Set
from typing import Any, Callable, Dict, Optional, Set

import torch

from nncf.common.graph.operator_metatypes import INPUT_NOOP_METATYPES
from nncf.torch.dynamic_graph.context import TracingContext
from nncf.torch.dynamic_graph.graph import DynamicGraph
from nncf.torch.dynamic_graph.graph_tracer import GraphTracer
from nncf.torch.dynamic_graph.io_handling import ModelInputElement
from nncf.torch.dynamic_graph.layer_attributes_handlers import set_nodes_attributes_in_nncf_graph
from nncf.torch.dynamic_graph.scope import Scope
from nncf.torch.graph.graph import PTNNCFGraph
Expand All @@ -29,10 +28,7 @@ def __init__(self, custom_forward_fn: Callable[[torch.nn.Module], Any]):
self.custom_forward_fn = custom_forward_fn

def build_graph(
self,
model: torch.nn.Module,
context_to_use: Optional[TracingContext] = None,
as_eval: bool = False
self, model: torch.nn.Module, context_to_use: Optional[TracingContext] = None, as_eval: bool = False
) -> PTNNCFGraph:
tracer = GraphTracer(self.custom_forward_fn)
dynamic_graph = tracer.trace_graph(model, context_to_use, as_eval)
Expand Down
28 changes: 15 additions & 13 deletions nncf/torch/model_creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,21 +165,24 @@ def get_input_info_from_config(config: NNCFConfig) -> ModelInputInfo:
if has_input_info_field(config):
return FillerInputInfo.from_nncf_config(config)

nncf_logger.debug("Config has no 'input_info' section, trying to use dataloader output as model inputs "
"for graph building.")
nncf_logger.debug(
"Config has no 'input_info' section, trying to use dataloader output as model inputs " "for graph building."
)
exact_info = ExactInputsInfo.from_nncf_config_dataloaders(config)
if exact_info is not None:
return exact_info
raise RuntimeError("Could not determine tensor inputs for the model's forward call.\n"
"If you are using the `nncf.quantize` API, make sure that you supply the "
"calibration dataloader to the `nncf.quantize` call.\n"
"If you are using the `create_compressed_model` API, either specify the "
"inputs by using the 'input_info' section in the NNCFConfig, or attach an "
"initialization dataloader to the NNCFConfig by calling "
"`NNCFConfig.register_extra_structs(...)` with one of the following extra "
f"structures:\n"
f"{EXTRA_STRUCTS_WITH_DATALOADERS}\n"
f"or by calling `nncf.torch.register_default_init_args`")
raise RuntimeError(
"Could not determine tensor inputs for the model's forward call.\n"
"If you are using the `nncf.quantize` API, make sure that you supply the "
"calibration dataloader to the `nncf.quantize` call.\n"
"If you are using the `create_compressed_model` API, either specify the "
"inputs by using the 'input_info' section in the NNCFConfig, or attach an "
"initialization dataloader to the NNCFConfig by calling "
"`NNCFConfig.register_extra_structs(...)` with one of the following extra "
f"structures:\n"
f"{EXTRA_STRUCTS_WITH_DATALOADERS}\n"
f"or by calling `nncf.torch.register_default_init_args`"
)


def create_nncf_network_with_inputs_from_config(model: torch.nn.Module, config: NNCFConfig):
Expand Down Expand Up @@ -315,4 +318,3 @@ def create_compression_algorithm_builder_from_algo_names(
else:
builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)
return builder

6 changes: 2 additions & 4 deletions nncf/torch/nncf_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
from nncf.torch.dynamic_graph.graph import ShapeIgnoringTensorMetaComparator
from nncf.torch.dynamic_graph.graph_tracer import GraphTracer
from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn
from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager, ModelInputElement
from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager
from nncf.torch.dynamic_graph.io_handling import ModelInputInfo
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
Expand Down Expand Up @@ -499,9 +499,7 @@ def rebuild_graph(self, *input_args):
builder = GraphBuilder(dummy_forward_fn)

with training_mode_switcher(self._model_ref, is_training=False):
self._compressed_graph = builder.build_graph(
self._model_ref, self._compressed_context
)
self._compressed_graph = builder.build_graph(self._model_ref, self._compressed_context)

def is_scope_in_nncf_module_scope(self, scope: Scope) -> bool:
norm_nncf_scopes = []
Expand Down
6 changes: 2 additions & 4 deletions nncf/torch/quantization/quantize_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class CalibrationDataLoaderAdapter(PTInitializingDataLoader):
"""
An adapter from the nncf.Dataset to the PTInitializingDataLoader.
"""

def get_target(self, dataloader_output: Any) -> Any:
raise NotImplementedError

Expand Down Expand Up @@ -216,10 +217,7 @@ def quantize_impl(
)

clone_model = deepcopy(model)
compression_ctrl, compressed_model = create_compressed_model(
model=clone_model,
config=nncf_config
)
compression_ctrl, compressed_model = create_compressed_model(model=clone_model, config=nncf_config)
compression_ctrl.prepare_for_export()
compressed_model.nncf.disable_dynamic_graph_building()

Expand Down
4 changes: 2 additions & 2 deletions tests/torch/automl/test_quantization_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from nncf.torch.automl.environment.quantization_env import ModelSizeCalculator
from nncf.torch.automl.environment.quantization_env import QuantizationEnv
from nncf.torch.automl.environment.quantization_env import QuantizationEnvParams

from nncf.torch.dynamic_graph.io_handling import FillerInputInfo
from nncf.torch.hardware.config import PTHWConfig
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.quantization.algo import ExperimentalQuantizationBuilder
Expand All @@ -38,7 +38,7 @@ def create_test_quantization_env(model_creator=BasicConvTestModel, input_info_cf
input_info_cfg = {"input_info": {"sample_size": [1, 1, 4, 4]}}

model = model_creator()
nncf_network = NNCFNetwork(model, input_info=FillerInputInfo(input_info_cfg))
nncf_network = NNCFNetwork(model, input_info=FillerInputInfo.from_nncf_config(input_info_cfg))
hw_config_type = HWConfigType.VPU
hw_config_path = HWConfig.get_path_to_hw_config(hw_config_type)
hw_config = PTHWConfig.from_json(hw_config_path)
Expand Down
6 changes: 3 additions & 3 deletions tests/torch/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from nncf.config.structures import BNAdaptationInitArgs
from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS
from nncf.torch.compression_method_api import PTCompressionAlgorithmController

from nncf.torch.dynamic_graph.io_handling import FillerInputInfo
from nncf.torch.dynamic_graph.scope import Scope
from nncf.torch.initialization import PTInitializingDataLoader
from nncf.torch.initialization import register_default_init_args
Expand Down Expand Up @@ -374,8 +374,8 @@ def __getitem__(self, index):
def create_any_mock_dataloader(
dataset_cls: type, config: NNCFConfig, num_samples: int = 1, batch_size: int = 1
) -> DataLoader:
input_infos_list = FillerInputInfo(config)
input_sample_size = input_infos_list[0].shape
input_info = FillerInputInfo.from_nncf_config(config)
input_sample_size = input_info.elements[0].shape
data_loader = DataLoader(
dataset_cls(input_sample_size[1:], num_samples),
batch_size=batch_size,
Expand Down
1 change: 0 additions & 1 deletion tests/torch/nas/creators.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
)
from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn
from nncf.torch.dynamic_graph.io_handling import FillerInputInfo

from nncf.torch.graph.transformations.layout import PTTransformationLayout
from nncf.torch.model_creation import create_nncf_network_with_inputs_from_config
from nncf.torch.model_transformer import PTModelTransformer
Expand Down
4 changes: 2 additions & 2 deletions tests/torch/ptq/test_quantize_model_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from torch import nn

from nncf.data import Dataset
from nncf.experimental.torch.quantization.quantize_model import create_nncf_network_with_config_ptq
from nncf.experimental.torch.quantization.quantize_model import create_nncf_network_ptq


class TestModel(nn.Module):
Expand All @@ -35,7 +35,7 @@ def transform_fn(inputs):
return x

dataset = Dataset([(torch.empty(input_shape), 1)] * 3, transform_fn)
nncf_network = create_nncf_network_with_config_ptq(model, dataset)
nncf_network = create_nncf_network_ptq(model, dataset)
nncf_graph = nncf_network.nncf.get_original_graph()
all_nodes = nncf_graph.get_all_nodes()
assert len(all_nodes) == 2
Expand Down
6 changes: 3 additions & 3 deletions tests/torch/quantization/quantization_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import torch

from nncf import NNCFConfig

from nncf.torch.dynamic_graph.io_handling import FillerInputInfo
from tests.torch.helpers import get_empty_config


Expand Down Expand Up @@ -75,8 +75,8 @@ def distributed_init_test_default(gpu, ngpus_per_node, config):


def create_rank_dataloader(config, rank, num_samples=10, batch_size=3):
input_infos_list = FillerInputInfo(config)
input_sample_size = input_infos_list[0].shape
input_infos_list = FillerInputInfo.from_nncf_config(config)
input_sample_size = input_infos_list.elements[0].shape
data_loader = torch.utils.data.DataLoader(
RankDatasetMock(input_sample_size[1:], rank, num_samples),
batch_size=batch_size,
Expand Down
Loading

0 comments on commit 39e57ff

Please sign in to comment.