diff --git a/examples/experimental/openvino/bert/main.py b/examples/experimental/openvino/bert/main.py index ea7a3772430..34dd31094b5 100644 --- a/examples/experimental/openvino/bert/main.py +++ b/examples/experimental/openvino/bert/main.py @@ -13,13 +13,13 @@ from pathlib import Path from typing import Any, Iterable -import datasets import evaluate import numpy as np import openvino.runtime as ov import torch import transformers +import datasets import nncf from nncf.parameters import ModelType diff --git a/nncf/config/config.py b/nncf/config/config.py index e2793419b5a..a414ce05c96 100644 --- a/nncf/config/config.py +++ b/nncf/config/config.py @@ -145,5 +145,3 @@ def validate(loaded_json): # Passed a list of dicts for compression_algo_dict in compression_section: validate_single_compression_algo_schema(compression_algo_dict, REF_VS_ALGO_SCHEMA) - - diff --git a/nncf/config/extractors.py b/nncf/config/extractors.py index 1cbf5642a8c..737a8d6caaf 100644 --- a/nncf/config/extractors.py +++ b/nncf/config/extractors.py @@ -226,4 +226,4 @@ def validate_accuracy_aware_schema(config: NNCFConfig, params: Dict[str, object] def has_input_info_field(config: NNCFConfig) -> bool: - return config.get('input_info') is not None \ No newline at end of file + return config.get("input_info") is not None diff --git a/nncf/config/schema.py b/nncf/config/schema.py index a18bea73525..7ad4e085b51 100644 --- a/nncf/config/schema.py +++ b/nncf/config/schema.py @@ -70,8 +70,9 @@ " during tracing and exporting.", ), "keyword": with_attributes( - STRING, description="Keyword to be used when passing the tensor to the model's 'forward' method - " - "leave unspecified to pass the corresponding argument as a positional arg." + STRING, + description="Keyword to be used when passing the tensor to the model's 'forward' method - " + "leave unspecified to pass the corresponding argument as a positional arg.", ), }, "additionalProperties": False, diff --git a/nncf/experimental/torch/quantization/quantize_model.py b/nncf/experimental/torch/quantization/quantize_model.py index 82e871d2d54..d7fbb3ac247 100644 --- a/nncf/experimental/torch/quantization/quantize_model.py +++ b/nncf/experimental/torch/quantization/quantize_model.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional, Tuple +from typing import Optional import torch @@ -20,7 +20,7 @@ from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization from nncf.scopes import IgnoredScope -from nncf.torch.dynamic_graph.io_handling import get_input_infos_from_dataset +from nncf.torch.dynamic_graph.io_handling import ExactInputsInfo from nncf.torch.nncf_network import NNCFNetwork from nncf.torch.utils import training_mode_switcher @@ -35,12 +35,10 @@ def create_nncf_network_ptq(model: torch.nn.Module, dataset: Dataset) -> NNCFNet :return: NNCFNetwork instance for the input model """ - input_info_list = get_input_infos_from_dataset(dataset) + input_info = ExactInputsInfo.from_nncf_dataset(dataset) with training_mode_switcher(model, is_training=False): - nncf_network = NNCFNetwork( - model, input_info=input_info_list - ) + nncf_network = NNCFNetwork(model, input_info=input_info) nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph() diff --git a/nncf/torch/dynamic_graph/graph_tracer.py b/nncf/torch/dynamic_graph/graph_tracer.py index cc70371efa5..5eac2da368f 100644 --- a/nncf/torch/dynamic_graph/graph_tracer.py +++ b/nncf/torch/dynamic_graph/graph_tracer.py @@ -8,9 +8,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict from copy import deepcopy -from typing import Any, Callable, List, Optional +from typing import Any, Callable, Optional import torch @@ -19,6 +18,7 @@ from nncf.torch.dynamic_graph.io_handling import ModelInputInfo from nncf.torch.utils import get_model_device + class GraphTracer: def __init__(self, custom_forward_fn: Callable[[torch.nn.Module], Any]): self.custom_forward_fn = custom_forward_fn diff --git a/nncf/torch/dynamic_graph/io_handling.py b/nncf/torch/dynamic_graph/io_handling.py index 0e4d9cd13ac..bb3020329d4 100644 --- a/nncf/torch/dynamic_graph/io_handling.py +++ b/nncf/torch/dynamic_graph/io_handling.py @@ -9,13 +9,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc -from collections import OrderedDict from copy import deepcopy from inspect import Parameter from inspect import Signature -from typing import Any, List, Optional, Type, Protocol -from typing import Tuple -from typing import Dict, Set +from typing import Any, Dict, List, Optional, Protocol, Set, Tuple, Type import torch @@ -26,7 +23,9 @@ from nncf.common.initialization.dataloader import NNCFDataLoader from nncf.common.logging import nncf_logger from nncf.common.utils.api_marker import api -from nncf.config.structures import BNAdaptationInitArgs, QuantizationRangeInitArgs, NNCFExtraConfigStruct +from nncf.config.structures import BNAdaptationInitArgs +from nncf.config.structures import NNCFExtraConfigStruct +from nncf.config.structures import QuantizationRangeInitArgs from nncf.torch.dynamic_graph.context import forward_nncf_trace from nncf.torch.dynamic_graph.patch_pytorch import register_operator from nncf.torch.initialization import wrap_dataloader_for_init @@ -155,20 +154,27 @@ def from_nncf_config(cls, config: NNCFConfig): if input_infos is None: raise RuntimeError("Passed NNCFConfig does not have an 'input_info' field") if isinstance(input_infos, dict): - return FillerInputInfo([FillerInputElement( - input_infos.get("sample_size"), - input_infos.get("type"), - input_infos.get("keyword"), - input_infos.get("filler"))]) + return FillerInputInfo( + [ + FillerInputElement( + input_infos.get("sample_size"), + input_infos.get("type"), + input_infos.get("keyword"), + input_infos.get("filler"), + ) + ] + ) if isinstance(input_infos, list): elements: List[FillerInputElement] = [] for info_dict in input_infos: - elements.append(FillerInputElement( - info_dict.get("sample_size"), - info_dict.get("type"), - info_dict.get("keyword"), - info_dict.get("filler") - )) + elements.append( + FillerInputElement( + info_dict.get("sample_size"), + info_dict.get("type"), + info_dict.get("keyword"), + info_dict.get("filler"), + ) + ) return FillerInputInfo(elements) raise RuntimeError("Invalid input_infos specified in config - should be either dict or list of dicts") @@ -217,7 +223,9 @@ def from_nncf_config_dataloaders(cls, config: NNCFConfig) -> Optional["ExactInpu class InputInfoWrapManager: - def __init__(self, input_info: ModelInputInfo, fwd_signature: Signature, module_ref_for_device: torch.nn.Module = None): + def __init__( + self, input_info: ModelInputInfo, fwd_signature: Signature, module_ref_for_device: torch.nn.Module = None + ): self._fwd_signature = fwd_signature self._module_ref_for_device = module_ref_for_device args, kwargs = input_info.get_forward_inputs() @@ -264,14 +272,9 @@ def wrap_inputs(self, model_args: Tuple, model_kwargs: Dict) -> Tuple[Tuple, Dic return bound_model_params.args, bound_model_params.kwargs - class HasDataloader(Protocol): def get_dataloader(self) -> NNCFDataLoader: pass -EXTRA_STRUCTS_WITH_DATALOADERS: List[Type[NNCFExtraConfigStruct]] = [ - QuantizationRangeInitArgs, - BNAdaptationInitArgs -] - +EXTRA_STRUCTS_WITH_DATALOADERS: List[Type[NNCFExtraConfigStruct]] = [QuantizationRangeInitArgs, BNAdaptationInitArgs] diff --git a/nncf/torch/exporter.py b/nncf/torch/exporter.py index d6c6bb4fa4c..9218dfcd271 100644 --- a/nncf/torch/exporter.py +++ b/nncf/torch/exporter.py @@ -8,7 +8,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from copy import copy from functools import partial from typing import Any, Tuple @@ -139,7 +138,6 @@ def to_single_batch_tensors(obj: torch.Tensor): args = objwalk(args, is_tensor, to_single_batch_tensors) kwargs = objwalk(kwargs, is_tensor, to_single_batch_tensors) export_args = (*args, kwargs) # according to a variant of passing kwargs in torch.onnx.export doc - num_tensors = 0 if self._input_names is not None: input_names = self._input_names diff --git a/nncf/torch/graph/graph_builder.py b/nncf/torch/graph/graph_builder.py index e82f829e989..997c56b4910 100644 --- a/nncf/torch/graph/graph_builder.py +++ b/nncf/torch/graph/graph_builder.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict -from typing import Any, Callable, Dict, List, Optional, Set +from typing import Any, Callable, Dict, Optional, Set import torch @@ -17,7 +17,6 @@ from nncf.torch.dynamic_graph.context import TracingContext from nncf.torch.dynamic_graph.graph import DynamicGraph from nncf.torch.dynamic_graph.graph_tracer import GraphTracer -from nncf.torch.dynamic_graph.io_handling import ModelInputElement from nncf.torch.dynamic_graph.layer_attributes_handlers import set_nodes_attributes_in_nncf_graph from nncf.torch.dynamic_graph.scope import Scope from nncf.torch.graph.graph import PTNNCFGraph @@ -29,10 +28,7 @@ def __init__(self, custom_forward_fn: Callable[[torch.nn.Module], Any]): self.custom_forward_fn = custom_forward_fn def build_graph( - self, - model: torch.nn.Module, - context_to_use: Optional[TracingContext] = None, - as_eval: bool = False + self, model: torch.nn.Module, context_to_use: Optional[TracingContext] = None, as_eval: bool = False ) -> PTNNCFGraph: tracer = GraphTracer(self.custom_forward_fn) dynamic_graph = tracer.trace_graph(model, context_to_use, as_eval) diff --git a/nncf/torch/model_creation.py b/nncf/torch/model_creation.py index 0d5b878c031..3900ba9d2c5 100644 --- a/nncf/torch/model_creation.py +++ b/nncf/torch/model_creation.py @@ -165,21 +165,24 @@ def get_input_info_from_config(config: NNCFConfig) -> ModelInputInfo: if has_input_info_field(config): return FillerInputInfo.from_nncf_config(config) - nncf_logger.debug("Config has no 'input_info' section, trying to use dataloader output as model inputs " - "for graph building.") + nncf_logger.debug( + "Config has no 'input_info' section, trying to use dataloader output as model inputs " "for graph building." + ) exact_info = ExactInputsInfo.from_nncf_config_dataloaders(config) if exact_info is not None: return exact_info - raise RuntimeError("Could not determine tensor inputs for the model's forward call.\n" - "If you are using the `nncf.quantize` API, make sure that you supply the " - "calibration dataloader to the `nncf.quantize` call.\n" - "If you are using the `create_compressed_model` API, either specify the " - "inputs by using the 'input_info' section in the NNCFConfig, or attach an " - "initialization dataloader to the NNCFConfig by calling " - "`NNCFConfig.register_extra_structs(...)` with one of the following extra " - f"structures:\n" - f"{EXTRA_STRUCTS_WITH_DATALOADERS}\n" - f"or by calling `nncf.torch.register_default_init_args`") + raise RuntimeError( + "Could not determine tensor inputs for the model's forward call.\n" + "If you are using the `nncf.quantize` API, make sure that you supply the " + "calibration dataloader to the `nncf.quantize` call.\n" + "If you are using the `create_compressed_model` API, either specify the " + "inputs by using the 'input_info' section in the NNCFConfig, or attach an " + "initialization dataloader to the NNCFConfig by calling " + "`NNCFConfig.register_extra_structs(...)` with one of the following extra " + f"structures:\n" + f"{EXTRA_STRUCTS_WITH_DATALOADERS}\n" + f"or by calling `nncf.torch.register_default_init_args`" + ) def create_nncf_network_with_inputs_from_config(model: torch.nn.Module, config: NNCFConfig): @@ -315,4 +318,3 @@ def create_compression_algorithm_builder_from_algo_names( else: builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init) return builder - diff --git a/nncf/torch/nncf_network.py b/nncf/torch/nncf_network.py index b2542392ccf..5b3279a084e 100644 --- a/nncf/torch/nncf_network.py +++ b/nncf/torch/nncf_network.py @@ -41,7 +41,7 @@ from nncf.torch.dynamic_graph.graph import ShapeIgnoringTensorMetaComparator from nncf.torch.dynamic_graph.graph_tracer import GraphTracer from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn -from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager, ModelInputElement +from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager from nncf.torch.dynamic_graph.io_handling import ModelInputInfo from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk @@ -499,9 +499,7 @@ def rebuild_graph(self, *input_args): builder = GraphBuilder(dummy_forward_fn) with training_mode_switcher(self._model_ref, is_training=False): - self._compressed_graph = builder.build_graph( - self._model_ref, self._compressed_context - ) + self._compressed_graph = builder.build_graph(self._model_ref, self._compressed_context) def is_scope_in_nncf_module_scope(self, scope: Scope) -> bool: norm_nncf_scopes = [] diff --git a/nncf/torch/quantization/quantize_model.py b/nncf/torch/quantization/quantize_model.py index f23c9786983..fad8fefeb9f 100644 --- a/nncf/torch/quantization/quantize_model.py +++ b/nncf/torch/quantization/quantize_model.py @@ -39,6 +39,7 @@ class CalibrationDataLoaderAdapter(PTInitializingDataLoader): """ An adapter from the nncf.Dataset to the PTInitializingDataLoader. """ + def get_target(self, dataloader_output: Any) -> Any: raise NotImplementedError @@ -216,10 +217,7 @@ def quantize_impl( ) clone_model = deepcopy(model) - compression_ctrl, compressed_model = create_compressed_model( - model=clone_model, - config=nncf_config - ) + compression_ctrl, compressed_model = create_compressed_model(model=clone_model, config=nncf_config) compression_ctrl.prepare_for_export() compressed_model.nncf.disable_dynamic_graph_building() diff --git a/tests/torch/automl/test_quantization_env.py b/tests/torch/automl/test_quantization_env.py index eec2c6eaa16..15bdc40f79f 100644 --- a/tests/torch/automl/test_quantization_env.py +++ b/tests/torch/automl/test_quantization_env.py @@ -22,7 +22,7 @@ from nncf.torch.automl.environment.quantization_env import ModelSizeCalculator from nncf.torch.automl.environment.quantization_env import QuantizationEnv from nncf.torch.automl.environment.quantization_env import QuantizationEnvParams - +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from nncf.torch.hardware.config import PTHWConfig from nncf.torch.nncf_network import NNCFNetwork from nncf.torch.quantization.algo import ExperimentalQuantizationBuilder @@ -38,7 +38,7 @@ def create_test_quantization_env(model_creator=BasicConvTestModel, input_info_cf input_info_cfg = {"input_info": {"sample_size": [1, 1, 4, 4]}} model = model_creator() - nncf_network = NNCFNetwork(model, input_info=FillerInputInfo(input_info_cfg)) + nncf_network = NNCFNetwork(model, input_info=FillerInputInfo.from_nncf_config(input_info_cfg)) hw_config_type = HWConfigType.VPU hw_config_path = HWConfig.get_path_to_hw_config(hw_config_type) hw_config = PTHWConfig.from_json(hw_config_path) diff --git a/tests/torch/helpers.py b/tests/torch/helpers.py index 3b95a56c4e9..f984cb2d58f 100644 --- a/tests/torch/helpers.py +++ b/tests/torch/helpers.py @@ -31,7 +31,7 @@ from nncf.config.structures import BNAdaptationInitArgs from nncf.torch.algo_selector import PT_COMPRESSION_ALGORITHMS from nncf.torch.compression_method_api import PTCompressionAlgorithmController - +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from nncf.torch.dynamic_graph.scope import Scope from nncf.torch.initialization import PTInitializingDataLoader from nncf.torch.initialization import register_default_init_args @@ -374,8 +374,8 @@ def __getitem__(self, index): def create_any_mock_dataloader( dataset_cls: type, config: NNCFConfig, num_samples: int = 1, batch_size: int = 1 ) -> DataLoader: - input_infos_list = FillerInputInfo(config) - input_sample_size = input_infos_list[0].shape + input_info = FillerInputInfo.from_nncf_config(config) + input_sample_size = input_info.elements[0].shape data_loader = DataLoader( dataset_cls(input_sample_size[1:], num_samples), batch_size=batch_size, diff --git a/tests/torch/nas/creators.py b/tests/torch/nas/creators.py index f2a768aeefa..8934b4b636d 100644 --- a/tests/torch/nas/creators.py +++ b/tests/torch/nas/creators.py @@ -27,7 +27,6 @@ ) from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn from nncf.torch.dynamic_graph.io_handling import FillerInputInfo - from nncf.torch.graph.transformations.layout import PTTransformationLayout from nncf.torch.model_creation import create_nncf_network_with_inputs_from_config from nncf.torch.model_transformer import PTModelTransformer diff --git a/tests/torch/ptq/test_quantize_model_helpers.py b/tests/torch/ptq/test_quantize_model_helpers.py index 30b7d2e4742..5c688933d5c 100644 --- a/tests/torch/ptq/test_quantize_model_helpers.py +++ b/tests/torch/ptq/test_quantize_model_helpers.py @@ -13,7 +13,7 @@ from torch import nn from nncf.data import Dataset -from nncf.experimental.torch.quantization.quantize_model import create_nncf_network_with_config_ptq +from nncf.experimental.torch.quantization.quantize_model import create_nncf_network_ptq class TestModel(nn.Module): @@ -35,7 +35,7 @@ def transform_fn(inputs): return x dataset = Dataset([(torch.empty(input_shape), 1)] * 3, transform_fn) - nncf_network = create_nncf_network_with_config_ptq(model, dataset) + nncf_network = create_nncf_network_ptq(model, dataset) nncf_graph = nncf_network.nncf.get_original_graph() all_nodes = nncf_graph.get_all_nodes() assert len(all_nodes) == 2 diff --git a/tests/torch/quantization/quantization_helpers.py b/tests/torch/quantization/quantization_helpers.py index d9d1d7cb021..a8f5f213552 100644 --- a/tests/torch/quantization/quantization_helpers.py +++ b/tests/torch/quantization/quantization_helpers.py @@ -11,7 +11,7 @@ import torch from nncf import NNCFConfig - +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from tests.torch.helpers import get_empty_config @@ -75,8 +75,8 @@ def distributed_init_test_default(gpu, ngpus_per_node, config): def create_rank_dataloader(config, rank, num_samples=10, batch_size=3): - input_infos_list = FillerInputInfo(config) - input_sample_size = input_infos_list[0].shape + input_infos_list = FillerInputInfo.from_nncf_config(config) + input_sample_size = input_infos_list.elements[0].shape data_loader = torch.utils.data.DataLoader( RankDatasetMock(input_sample_size[1:], rank, num_samples), batch_size=batch_size, diff --git a/tests/torch/quantization/test_hawq_precision_init.py b/tests/torch/quantization/test_hawq_precision_init.py index 9284296c311..0d8542537eb 100644 --- a/tests/torch/quantization/test_hawq_precision_init.py +++ b/tests/torch/quantization/test_hawq_precision_init.py @@ -29,6 +29,7 @@ from examples.common.sample_config import SampleConfig from examples.torch.classification.main import create_cifar from examples.torch.object_detection.models.ssd_vgg import SSD_VGG +from nncf import NNCFConfig from nncf.common.graph import NNCFNodeName from nncf.common.hardware.config import HWConfigType from nncf.common.quantization.quantizer_setup import SingleConfigQuantizerSetup @@ -36,7 +37,7 @@ from nncf.common.utils.debug import set_debug_log_dir from nncf.torch import register_default_init_args from nncf.torch.checkpoint_loading import load_state - +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from nncf.torch.initialization import default_criterion_fn from nncf.torch.quantization.adjust_padding import add_adjust_padding_nodes from nncf.torch.quantization.hessian_trace import HessianTraceEstimator @@ -78,8 +79,8 @@ from tests.torch.test_models.mobilenet import mobilenet_v2 -def create_test_dataloaders(config, dataset_dir): - input_info = FillerInputInfo(config)[0] +def create_test_dataloaders(config: NNCFConfig, dataset_dir): + input_info = FillerInputInfo.from_nncf_config(config).elements[0] image_size = input_info.shape[-1] batch_size = input_info.shape[0] normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) diff --git a/tests/torch/quantization/test_scheduler.py b/tests/torch/quantization/test_scheduler.py index 9c474d82d58..9f0ed0b6924 100644 --- a/tests/torch/quantization/test_scheduler.py +++ b/tests/torch/quantization/test_scheduler.py @@ -15,7 +15,7 @@ from nncf.common.statistics import NNCFStatistics from nncf.config.structures import QuantizationRangeInitArgs from nncf.torch import register_default_init_args - +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from nncf.torch.initialization import wrap_dataloader_for_init from nncf.torch.quantization.base_ctrl import QuantizationControllerBase from nncf.torch.quantization.schedulers import StagedQuantizationScheduler @@ -192,8 +192,8 @@ def test_staged_scheduler_with_range_init(): register_bn_adaptation_init_args(config) model = squeezenet1_1(num_classes=10, dropout=0) - input_infos_list = FillerInputInfo(config) - input_sample_size = input_infos_list[0].shape + input_infos_list = FillerInputInfo.from_nncf_config(config) + input_sample_size = input_infos_list.elements[0].shape data_loader = DataLoader( OnesDatasetMock(input_sample_size[1:]), batch_size=1, @@ -254,8 +254,8 @@ def test_staged_scheduler_with_hawq(): num_classes = 10 model = squeezenet1_1(num_classes=num_classes, dropout=0) - input_infos_list = FillerInputInfo(config) - input_sample_size = input_infos_list[0].shape + input_infos_list = FillerInputInfo.from_nncf_config(config) + input_sample_size = input_infos_list.elements[0].shape data_loader = DataLoader( HawqDatasetMock(input_sample_size[1:], num_classes), batch_size=1, diff --git a/tests/torch/sparsity/movement/helpers/run_recipe.py b/tests/torch/sparsity/movement/helpers/run_recipe.py index 89ca9d4658f..f5f172a55fb 100644 --- a/tests/torch/sparsity/movement/helpers/run_recipe.py +++ b/tests/torch/sparsity/movement/helpers/run_recipe.py @@ -20,7 +20,6 @@ import torch.nn import torch.nn.functional as F import torch.utils.data -from datasets import Dataset # pylint: disable=no-name-in-module from transformers import AutoModelForAudioClassification from transformers import AutoModelForImageClassification from transformers import AutoModelForSequenceClassification @@ -34,6 +33,7 @@ from transformers import SwinConfig from transformers import Wav2Vec2Config +from datasets import Dataset # pylint: disable=no-name-in-module from nncf import NNCFConfig from nncf.experimental.torch.sparsity.movement.scheduler import MovementSchedulerParams from nncf.torch.dynamic_graph.io_handling import FillerInputElement @@ -482,7 +482,9 @@ class SwinRunRecipe(BaseMockRunRecipe): @property def model_input_info(self) -> ModelInputInfo: img_size = self.model_config.image_size - return FillerInputInfo([FillerInputElement(shape=[1, self.model_config.num_channels, img_size, img_size], keyword="pixel_values")]) + return FillerInputInfo( + [FillerInputElement(shape=[1, self.model_config.num_channels, img_size, img_size], keyword="pixel_values")] + ) @property def transformer_block_info(self) -> List[TransformerBlockInfo]: diff --git a/tests/torch/sparsity/movement/helpers/trainer.py b/tests/torch/sparsity/movement/helpers/trainer.py index 7da241bf712..63e62f5e0db 100644 --- a/tests/torch/sparsity/movement/helpers/trainer.py +++ b/tests/torch/sparsity/movement/helpers/trainer.py @@ -14,7 +14,6 @@ import numpy as np import torch -from datasets import Dataset # pylint: disable=no-name-in-module from transformers import TrainingArguments from transformers.trainer import Trainer from transformers.trainer_callback import TrainerCallback @@ -22,6 +21,7 @@ from transformers.trainer_callback import TrainerState from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR +from datasets import Dataset # pylint: disable=no-name-in-module from nncf.api.compression import CompressionAlgorithmController from nncf.common.compression import BaseCompressionAlgorithmController from nncf.common.utils.tensorboard import prepare_for_tensorboard diff --git a/tests/torch/sparsity/movement/test_model_saving.py b/tests/torch/sparsity/movement/test_model_saving.py index c20a5cc6255..4fc6ee4d044 100644 --- a/tests/torch/sparsity/movement/test_model_saving.py +++ b/tests/torch/sparsity/movement/test_model_saving.py @@ -18,7 +18,6 @@ import pytest import torch from addict import Dict -from datasets import Dataset # pylint: disable=no-name-in-module from onnx import numpy_helper from openvino.runtime import Core from openvino.runtime import serialize @@ -29,6 +28,7 @@ from scipy.special import softmax from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR +from datasets import Dataset # pylint: disable=no-name-in-module from nncf.torch import create_compressed_model from nncf.torch.checkpoint_loading import load_state from tests.torch.helpers import PTTensorListComparator diff --git a/tests/torch/sparsity/movement/training_scripts/run_glue.py b/tests/torch/sparsity/movement/training_scripts/run_glue.py index 09a6bf6c737..de6735e011d 100644 --- a/tests/torch/sparsity/movement/training_scripts/run_glue.py +++ b/tests/torch/sparsity/movement/training_scripts/run_glue.py @@ -12,12 +12,13 @@ from pathlib import Path from typing import Dict, List, Optional, Tuple -import datasets import evaluate import jstyleson import numpy as np from transformers.training_args import ParallelMode +import datasets + # isort: off from nncf import NNCFConfig from nncf.api.compression import CompressionAlgorithmController diff --git a/tests/torch/test_compressed_graph.py b/tests/torch/test_compressed_graph.py index 6314389503f..31b4ce8c13d 100644 --- a/tests/torch/test_compressed_graph.py +++ b/tests/torch/test_compressed_graph.py @@ -30,8 +30,8 @@ from nncf.common.quantization.quantizer_setup import SingleConfigQuantizerSetup from nncf.torch import nncf_model_input from nncf.torch import nncf_model_output -from nncf.torch.dynamic_graph.io_handling import FillerInputElement from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn +from nncf.torch.dynamic_graph.io_handling import FillerInputElement from nncf.torch.dynamic_graph.io_handling import FillerInputInfo from nncf.torch.graph.graph import PTNNCFGraph from nncf.torch.graph.graph_builder import GraphBuilder diff --git a/tests/torch/test_graph_building.py b/tests/torch/test_graph_building.py index ab6f9da1125..74bf01cf13e 100644 --- a/tests/torch/test_graph_building.py +++ b/tests/torch/test_graph_building.py @@ -10,10 +10,7 @@ # limitations under the License. from copy import deepcopy from dataclasses import dataclass -from typing import Dict -from typing import List, Tuple -from typing import Optional -from typing import Union +from typing import Dict, List, Tuple, Union from unittest.mock import MagicMock import pytest @@ -89,10 +86,7 @@ def forward(self, x): mod = Model() - tracer = GraphTracer( - custom_forward_fn=create_dummy_forward_fn(FillerInputInfo([FillerInputElement([1, 1, 1, 1])]) - ) - ) + tracer = GraphTracer(custom_forward_fn=create_dummy_forward_fn(FillerInputInfo([FillerInputElement([1, 1, 1, 1])]))) graph = tracer.trace_graph(mod) unique_op_exec_contexts = set() @@ -187,9 +181,11 @@ def test_activation_shape_tracing(input_shape: Tuple[int, ...]): model = ModelForTest() graph_builder = GraphBuilder( create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement(input_shape), - ]), + FillerInputInfo( + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -258,7 +254,8 @@ def forward(self, x): def test_concat_attributes_saved_during_graph_building(input_shape): model = ModelForTestWithReshapeFlattenAndConcat() graph_builder = GraphBuilder( - create_dummy_forward_fn(FillerInputInfo([FillerInputElement(input_shape)]), + create_dummy_forward_fn( + FillerInputInfo([FillerInputElement(input_shape)]), with_input_tracing=True, with_output_tracing=True, ) @@ -288,9 +285,10 @@ def test_reshape_attributes_saved_during_graph_building(input_shape): graph_builder = GraphBuilder( create_dummy_forward_fn( FillerInputInfo( - [ - FillerInputElement(input_shape), - ]), + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -341,9 +339,11 @@ def test_permute_attributes_saved_during_graph_building(input_shape): model = ModelWithPermute() graph_builder = GraphBuilder( create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement(input_shape), - ]), + FillerInputInfo( + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -394,9 +394,11 @@ def test_split_attributes(input_shape): model = ModelForTestWithSplit(input_shape) graph_builder = GraphBuilder( create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement(input_shape), - ]), + FillerInputInfo( + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -429,9 +431,11 @@ def forward(self, x): def test_getitem_attributes(input_shape): model = SplitByGetItemModel() custom_forward_fn = create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement(input_shape), - ]), + FillerInputInfo( + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -476,9 +480,11 @@ def _get_default_nncf_graph_edge(from_node, to_node, input_port_id, output_port_ model = ParallelEdgesModel() graph_builder = GraphBuilder( create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement(input_shape), - ]), + FillerInputInfo( + [ + FillerInputElement(input_shape), + ] + ), with_input_tracing=True, with_output_tracing=True, ) @@ -502,7 +508,6 @@ def _get_default_nncf_graph_edge(from_node, to_node, input_port_id, output_port_ assert set(input_node_output_edges) == ref_output_edges - class MockModel(torch.nn.Module): def __init__(self, stub_forward): super().__init__() @@ -512,6 +517,7 @@ def __init__(self, stub_forward): def forward(self, *args, **kwargs): return self.stub_forward(*args, **kwargs) + class RandomRefTensor: def __init__(self, shape: List[int]): self.tensor = torch.rand(shape) @@ -525,8 +531,9 @@ def check_arg(test_arg: torch.Tensor, ref_arg: Union[torch.Tensor, RandomRefTens else: assert torch.allclose(test_arg, ref_arg) + class MockInputInfo(ModelInputInfo): - MOCK_ARGS = (torch.Tensor([42.0]), ) + MOCK_ARGS = (torch.Tensor([42.0]),) MOCK_KWARGS = {"foo": torch.ones([1, 3])} def get_forward_inputs(self, device: str = None) -> Tuple[Tuple, Dict]: @@ -553,6 +560,7 @@ def test_input_info_args_are_passed_into_forward(mocker): for keyword, arg in forward_call_kwargs.items(): check_arg(arg, ref_kwargs[keyword]) + @dataclass class FillerInputInfoGenerationTestStruct: config_input_info_subdict: Union[List[Dict], Dict] @@ -566,8 +574,8 @@ class FillerInputInfoGenerationTestStruct: FILLER_GEN_TEST_STRUCTS = [ FillerInputInfoGenerationTestStruct( config_input_info_subdict={"sample_size": [2, 3, 300, 300], "type": "float", "filler": "zeros"}, - ref_args=(torch.zeros([2, 3, 300, 300]), ), - ref_kwargs={} + ref_args=(torch.zeros([2, 3, 300, 300]),), + ref_kwargs={}, ), FillerInputInfoGenerationTestStruct( config_input_info_subdict=[ @@ -575,17 +583,20 @@ class FillerInputInfoGenerationTestStruct: {"sample_size": [1, 128], "type": "long", "filler": "ones"}, {"sample_size": [1, 128], "type": "long", "filler": "zeros"}, ], - ref_args=(torch.ones([1, 128], dtype=torch.long), - torch.ones([1, 128], dtype=torch.long), - torch.zeros([1, 128], dtype=torch.long)), - ref_kwargs={}), + ref_args=( + torch.ones([1, 128], dtype=torch.long), + torch.ones([1, 128], dtype=torch.long), + torch.zeros([1, 128], dtype=torch.long), + ), + ref_kwargs={}, + ), FillerInputInfoGenerationTestStruct( config_input_info_subdict=[ {"sample_size": [2, 3, 300, 300], "type": "float", "filler": "zeros"}, {"sample_size": [1, 128], "type": "long", "filler": "ones", "keyword": TEST_KEYWORD_1}, ], ref_args=(torch.zeros([2, 3, 300, 300]),), - ref_kwargs={TEST_KEYWORD_1: torch.ones([1, 128], dtype=torch.long)} + ref_kwargs={TEST_KEYWORD_1: torch.ones([1, 128], dtype=torch.long)}, ), FillerInputInfoGenerationTestStruct( config_input_info_subdict=[ @@ -593,15 +604,17 @@ class FillerInputInfoGenerationTestStruct: {"sample_size": [2, 3, 300, 300], "type": "float", "filler": "zeros"}, {"sample_size": [1, 128], "type": "long", "filler": "ones", "keyword": TEST_KEYWORD_2}, ], - ref_args=(torch.zeros([2, 3, 300, 300]), ), - ref_kwargs={TEST_KEYWORD_1: RandomRefTensor([8, 7]), - TEST_KEYWORD_2: torch.ones([1, 128], dtype=torch.long)}) + ref_args=(torch.zeros([2, 3, 300, 300]),), + ref_kwargs={TEST_KEYWORD_1: RandomRefTensor([8, 7]), TEST_KEYWORD_2: torch.ones([1, 128], dtype=torch.long)}, + ), ] @pytest.mark.parametrize("filler_gen_test_struct", FILLER_GEN_TEST_STRUCTS) def test_filler_input_info_arg_generation(filler_gen_test_struct: FillerInputInfoGenerationTestStruct): - filler_input_info = FillerInputInfo.from_nncf_config(NNCFConfig.from_dict({"input_info": filler_gen_test_struct.config_input_info_subdict})) + filler_input_info = FillerInputInfo.from_nncf_config( + NNCFConfig.from_dict({"input_info": filler_gen_test_struct.config_input_info_subdict}) + ) test_args, test_kwargs = filler_input_info.get_forward_inputs() for test_arg, ref_arg in zip(test_args, filler_gen_test_struct.ref_args): @@ -754,9 +767,11 @@ def test_get_all_nodes(): builder = GraphBuilder( create_dummy_forward_fn( - FillerInputInfo([ - FillerInputElement((1, 1, 4, 4)), - ]) + FillerInputInfo( + [ + FillerInputElement((1, 1, 4, 4)), + ] + ) ) ) graph = builder.build_graph(model) @@ -784,9 +799,11 @@ def forward(self, x: torch.Tensor): def test_integer_path_marking(): - input_infos = FillerInputInfo([ - FillerInputElement(ModelWithIntegerPaths.INPUT_SHAPE), - ]) + input_infos = FillerInputInfo( + [ + FillerInputElement(ModelWithIntegerPaths.INPUT_SHAPE), + ] + ) builder = GraphBuilder(create_dummy_forward_fn(input_infos)) nncf_graph = builder.build_graph(ModelWithIntegerPaths()) edges = list(nncf_graph.get_all_edges()) diff --git a/tests/torch/test_input_management.py b/tests/torch/test_input_management.py index 9ee5e318bfe..bcbb3ade7f4 100644 --- a/tests/torch/test_input_management.py +++ b/tests/torch/test_input_management.py @@ -11,13 +11,15 @@ import inspect from dataclasses import dataclass -from typing import Tuple, Dict, List +from typing import Dict, List, Tuple import pytest import torch -from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager, FillerInputInfo, FillerInputElement, \ - ModelInputInfo +from nncf.torch.dynamic_graph.io_handling import FillerInputElement +from nncf.torch.dynamic_graph.io_handling import FillerInputInfo +from nncf.torch.dynamic_graph.io_handling import InputInfoWrapManager +from nncf.torch.dynamic_graph.io_handling import ModelInputInfo from tests.torch.helpers import MockModel from tests.torch.helpers import create_compressed_model_and_algo_for_test from tests.torch.helpers import register_bn_adaptation_init_args @@ -36,6 +38,7 @@ def forward(arg1=None, arg2=None, arg3=None, arg4=None, arg5=TENSOR_DEFAULT): pass + @dataclass class InputWrappingTestStruct: input_info: ModelInputInfo @@ -54,36 +57,38 @@ def get_case_id(self) -> str: model_args=(TENSOR_1,), model_kwargs={}, ref_wrapping_sequence=[TENSOR_1], - case_id="single_arg" + case_id="single_arg", ), InputWrappingTestStruct( input_info=FillerInputInfo([FillerInputElement([1], keyword="arg2")]), model_args=(), model_kwargs={"arg2": TENSOR_2}, ref_wrapping_sequence=[TENSOR_2], - case_id="single_kwarg" + case_id="single_kwarg", ), InputWrappingTestStruct( - input_info=FillerInputInfo([ - FillerInputElement([1]), - FillerInputElement([1]), - FillerInputElement([1], keyword="arg3"), - FillerInputElement([1], keyword="arg5"), - ]), + input_info=FillerInputInfo( + [ + FillerInputElement([1]), + FillerInputElement([1]), + FillerInputElement([1], keyword="arg3"), + FillerInputElement([1], keyword="arg5"), + ] + ), model_args=(TENSOR_1, TENSOR_2), model_kwargs={"arg3": TENSOR_3, "arg5": TENSOR_4}, ref_wrapping_sequence=[TENSOR_1, TENSOR_2, TENSOR_3, TENSOR_4], - case_id="args_and_kwargs" + case_id="args_and_kwargs", ), # More args supplied than what is specified by input_infos - ignore the unspecified args InputWrappingTestStruct( - input_info=FillerInputInfo([FillerInputElement([1]), - FillerInputElement([1], keyword="arg3"), - FillerInputElement([1], keyword="arg5")]), + input_info=FillerInputInfo( + [FillerInputElement([1]), FillerInputElement([1], keyword="arg3"), FillerInputElement([1], keyword="arg5")] + ), model_args=(TENSOR_1, TENSOR_2), model_kwargs={"arg3": TENSOR_3, "arg5": TENSOR_4}, ref_wrapping_sequence=[TENSOR_1, TENSOR_3, TENSOR_4], - case_id="more_args_than_specified" + case_id="more_args_than_specified", ), # More args and kwargs supplied than what is specified by input_infos - ignore the unspecified args and kwargs InputWrappingTestStruct( @@ -91,7 +96,7 @@ def get_case_id(self) -> str: model_args=(TENSOR_1, TENSOR_2), model_kwargs={"arg4": TENSOR_3, "arg5": TENSOR_4}, ref_wrapping_sequence=[TENSOR_1, TENSOR_3], - case_id="more_args_and_kwargs_than_specified" + case_id="more_args_and_kwargs_than_specified", ), # arg specified, but kwarg supplied InputWrappingTestStruct( @@ -99,14 +104,14 @@ def get_case_id(self) -> str: model_args=(), model_kwargs={"arg3": TENSOR_1}, ref_wrapping_sequence=[TENSOR_FROM_INPUT_INFO_1], - case_id="kwarg_instead_of_arg" + case_id="kwarg_instead_of_arg", ), InputWrappingTestStruct( input_info=FillerInputInfo([FillerInputElement([1], keyword="arg5")]), model_args=(), model_kwargs={"arg1": TENSOR_1}, ref_wrapping_sequence=[TENSOR_DEFAULT], - case_id="arg_as_kwarg" + case_id="arg_as_kwarg", ), # kwarg specified, but missing in supplied kwargs InputWrappingTestStruct( @@ -114,33 +119,36 @@ def get_case_id(self) -> str: model_args=(TENSOR_1, TENSOR_2), model_kwargs={"arg4": TENSOR_3, "arg5": TENSOR_4}, ref_wrapping_sequence=[TENSOR_1, TENSOR_FROM_INPUT_INFO_2], - case_id="missing_kwarg" + case_id="missing_kwarg", ), # More args specified than supplied InputWrappingTestStruct( - input_info=FillerInputInfo([FillerInputElement([1]), - FillerInputElement([2]), - FillerInputElement([3], keyword="arg3")]), + input_info=FillerInputInfo( + [FillerInputElement([1]), FillerInputElement([2]), FillerInputElement([3], keyword="arg3")] + ), model_args=(TENSOR_1,), model_kwargs={"arg3": TENSOR_2}, ref_wrapping_sequence=[TENSOR_1, TENSOR_FROM_INPUT_INFO_2, TENSOR_2], - case_id="less_args_supplied" + case_id="less_args_supplied", ), # More kwargs specified than supplied InputWrappingTestStruct( - input_info=FillerInputInfo([FillerInputElement([1]), - FillerInputElement([2], keyword="arg2"), - FillerInputElement([3], keyword="arg3")]), + input_info=FillerInputInfo( + [FillerInputElement([1]), FillerInputElement([2], keyword="arg2"), FillerInputElement([3], keyword="arg3")] + ), model_args=(TENSOR_1,), model_kwargs={"arg2": TENSOR_2}, ref_wrapping_sequence=[TENSOR_1, TENSOR_2, TENSOR_FROM_INPUT_INFO_3], - case_id="less_kwargs_supplied" + case_id="less_kwargs_supplied", ), ] -@pytest.fixture(params=INPUT_WRAPPING_TEST_CASES, name="inputs_test_struct", - ids=[x.get_case_id() for x in INPUT_WRAPPING_TEST_CASES]) +@pytest.fixture( + params=INPUT_WRAPPING_TEST_CASES, + name="inputs_test_struct", + ids=[x.get_case_id() for x in INPUT_WRAPPING_TEST_CASES], +) def inputs_test_struct_(request): return request.param diff --git a/tests/torch/test_layer_attributes.py b/tests/torch/test_layer_attributes.py index e23075e8eae..ea08c336a4d 100644 --- a/tests/torch/test_layer_attributes.py +++ b/tests/torch/test_layer_attributes.py @@ -8,7 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, List, Optional, Type +from typing import Callable, Optional, Type import pytest from torch import Size diff --git a/tests/torch/test_nncf_network.py b/tests/torch/test_nncf_network.py index df83723bfac..141445f7865 100644 --- a/tests/torch/test_nncf_network.py +++ b/tests/torch/test_nncf_network.py @@ -74,9 +74,11 @@ def forward(self, inputs): qnet_no_shape = NNCFNetwork( deepcopy(model), - input_info=FillerInputInfo([ - FillerInputElement(input_shape_1), - ]), + input_info=FillerInputInfo( + [ + FillerInputElement(input_shape_1), + ] + ), scopes_without_shape_matching=["MatMulModel"], ) @@ -95,9 +97,11 @@ def forward(self, inputs): qnet = NNCFNetwork( model, - input_info=FillerInputInfo([ - FillerInputElement(input_shape_1), - ]), + input_info=FillerInputInfo( + [ + FillerInputElement(input_shape_1), + ] + ), ) context = qnet.nncf.get_tracing_context() context.enable_trace_dynamic_graph() @@ -240,8 +244,7 @@ def test_get_weighted_original_graph_nodes(): # pylint: disable=protected-access def test_get_op_nodes_in_scope(): model = TwoConvTestModel() - nncf_model = NNCFNetwork(deepcopy(model), - input_info=FillerInputInfo([FillerInputElement([1, 1, 4, 4])])) + nncf_model = NNCFNetwork(deepcopy(model), input_info=FillerInputInfo([FillerInputElement([1, 1, 4, 4])])) nncf_graph = nncf_model.nncf.get_original_graph() # Valid scopes should be successfully found @@ -622,7 +625,8 @@ def test_forward_signature_is_same_as_for_original_model(simple_net): # Verify that if we create 2 NNCFNetworks, then each will have its own signature another_original_obj = MultiInputModel() another_nncf_net = NNCFNetwork( - MultiInputModel(), input_info=FillerInputInfo([FillerInputElement([1, 1, 1, 1]), FillerInputElement([1, 1, 1, 1])]) + MultiInputModel(), + input_info=FillerInputInfo([FillerInputElement([1, 1, 1, 1]), FillerInputElement([1, 1, 1, 1])]), ) assert inspect.signature(another_nncf_net.forward) == inspect.signature(another_original_obj.forward) assert inspect.signature(simple_net.forward) == inspect.signature(original_obj.forward)