diff --git a/nncf/openvino/graph/node_utils.py b/nncf/openvino/graph/node_utils.py index 9c9d41137cf..72d6f747be2 100644 --- a/nncf/openvino/graph/node_utils.py +++ b/nncf/openvino/graph/node_utils.py @@ -200,37 +200,37 @@ def get_reduce_op(node: ov.Node, output_port_id: int) -> ov.Node: return get_reduce_op -def get_inplace_min_op(node_name: str, reduction_shape: Tuple[int, ...]) -> InplaceInsertionFnType: +def get_inplace_min_op(node_name: str, reduction_axes: Tuple[int, ...]) -> InplaceInsertionFnType: """ Returns inplace min function that adds reduce min node to a passed node. :param node_name: Min reduce node name. - :param reduction_shape: Target reduction axes for the reduction node. + :param reduction_axes: Target reduction axes for the reduction node. :returns: Inplace insertion function to use in ModelTransformer. """ - return get_inplace_reduce_op(opset.reduce_min, node_name, reduction_shape, False) + return get_inplace_reduce_op(opset.reduce_min, node_name, reduction_axes, False) -def get_inplace_max_op(node_name: str, reduction_shape: Tuple[int, ...], use_abs_max: bool) -> InplaceInsertionFnType: +def get_inplace_max_op(node_name: str, reduction_axes: Tuple[int, ...], use_abs_max: bool) -> InplaceInsertionFnType: """ Returns inplace max function that adds reduce max node to a passed node. :param node_name: Max reduce node name. - :param reduction_shape: Target reduction axes for the reduction node. + :param reduction_axes: Target reduction axes for the reduction node. :param use_abs: Wheather reduce absolute values of input tensors or not. :returns: Inplace insertion function to use in ModelTransformer. """ - return get_inplace_reduce_op(opset.reduce_max, node_name, reduction_shape, use_abs_max) + return get_inplace_reduce_op(opset.reduce_max, node_name, reduction_axes, use_abs_max) -def get_inplace_mean_op(node_name: str, reduction_shape: Tuple[int, ...]) -> InplaceInsertionFnType: +def get_inplace_mean_op(node_name: str, reduction_axes: Tuple[int, ...]) -> InplaceInsertionFnType: """ Returns inplace mean function that adds reduce mean node to a passed node. :param node_name: Mean reduce node name. :returns: Inplace insertion function to use in ModelTransformer. """ - return get_inplace_reduce_op(opset.reduce_mean, node_name, reduction_shape, False) + return get_inplace_reduce_op(opset.reduce_mean, node_name, reduction_axes, False) def get_inplace_batch_mean_op(node_name: str) -> InplaceInsertionFnType: @@ -373,18 +373,18 @@ def get_matmul_channel_axes(weights_port_id: int, ndims: int, transpose: bool) - return channel_axes -def get_channel_agnostic_reduction_shape(channel_axes: List[int], shape: List[int]) -> Tuple[int]: +def get_channel_agnostic_reduction_axes(channel_axes: List[int], shape: List[int]) -> Tuple[int]: """ - Returns filtered reduction shape without axes that corresponds channels. + Returns filtered reduction axes without axes that corresponds channels. :param channel_axes: List of the channel axes. :param shape: Shape that need to be filtered. :return: Reduction shape in tuple format. """ - reduction_shape = list(range(len(shape))) + reduction_axes = list(range(len(shape))) for channel_axis in sorted(channel_axes, reverse=True): - del reduction_shape[channel_axis] - return tuple(reduction_shape) + del reduction_axes[channel_axis] + return tuple(reduction_axes) def create_bias_tensor(node_without_bias: NNCFNode, graph: NNCFGraph, value: Any) -> np.ndarray: diff --git a/nncf/quantization/algorithms/bias_correction/algorithm.py b/nncf/quantization/algorithms/bias_correction/algorithm.py index c3e1e10a6f6..33b5dc41b97 100644 --- a/nncf/quantization/algorithms/bias_correction/algorithm.py +++ b/nncf/quantization/algorithms/bias_correction/algorithm.py @@ -509,7 +509,7 @@ def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPoin TargetType.POST_LAYER_OPERATION, node_name, port_id=OUTPUT_PORT_OF_NODE ) stat_collector = self._backend_entity.mean_statistic_collector( - reduction_shape=channel_axis, num_samples=self.subset_size, inplace=self.inplace_statistics + reduction_axes=channel_axis, num_samples=self.subset_size, inplace=self.inplace_statistics ) statistic_container.add_statistic_point( StatisticPoint( diff --git a/nncf/quantization/algorithms/bias_correction/backend.py b/nncf/quantization/algorithms/bias_correction/backend.py index 08e9a8fbd27..41f535ca435 100644 --- a/nncf/quantization/algorithms/bias_correction/backend.py +++ b/nncf/quantization/algorithms/bias_correction/backend.py @@ -87,7 +87,7 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: TargetPoint) - @staticmethod @abstractmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, @@ -95,7 +95,7 @@ def mean_statistic_collector( """ Returns backend-specific mean statistic collector. - :param reduction_shape: Channel axis for the statistics aggregation. + :param reduction_axes: Channel axis for the statistics aggregation. :param inplace: Whether to calculate statistic inplace or not. :param num_samples: Maximum number of samples to collect. :param window_size: The maximum size of the samples queue. diff --git a/nncf/quantization/algorithms/bias_correction/onnx_backend.py b/nncf/quantization/algorithms/bias_correction/onnx_backend.py index 0b45a16309e..f773680ca03 100644 --- a/nncf/quantization/algorithms/bias_correction/onnx_backend.py +++ b/nncf/quantization/algorithms/bias_correction/onnx_backend.py @@ -77,12 +77,12 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: ONNXTargetPoin @staticmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, ) -> ONNXMeanStatisticCollector: - return ONNXMeanStatisticCollector(reduction_shape, num_samples, window_size) + return ONNXMeanStatisticCollector(reduction_axes, num_samples, window_size) @staticmethod def raw_statistic_collector(inplace: bool, num_samples: int = None) -> ONNXMeanStatisticCollector: diff --git a/nncf/quantization/algorithms/bias_correction/openvino_backend.py b/nncf/quantization/algorithms/bias_correction/openvino_backend.py index 29d33131f8e..c4e2dcfa007 100644 --- a/nncf/quantization/algorithms/bias_correction/openvino_backend.py +++ b/nncf/quantization/algorithms/bias_correction/openvino_backend.py @@ -65,12 +65,12 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: OVTargetPoint) @staticmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, ) -> TensorCollector: - return get_mean_stat_collector(num_samples, reduction_shape, window_size, inplace) + return get_mean_stat_collector(num_samples, reduction_axes, window_size, inplace) @staticmethod def raw_statistic_collector(inplace: bool, num_samples: int = None) -> TensorCollector: diff --git a/nncf/quantization/algorithms/channel_alignment/algorithm.py b/nncf/quantization/algorithms/channel_alignment/algorithm.py index c5cf65dfefd..0170606c64e 100644 --- a/nncf/quantization/algorithms/channel_alignment/algorithm.py +++ b/nncf/quantization/algorithms/channel_alignment/algorithm.py @@ -374,11 +374,11 @@ def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPoin for conv_in, add_in, _ in self._get_node_pairs(graph): target_point, node_in = self._get_target_point_and_node_in(conv_in, add_in) channel_axis = conv_in.metatype.output_channel_axis - reduction_shape = list(range(len(graph.get_output_edges(node_in)[0].tensor_shape))) - reduction_shape.remove(channel_axis) + reduction_axes = list(range(len(graph.get_output_edges(node_in)[0].tensor_shape))) + reduction_axes.remove(channel_axis) statistic_collector = self._backend_entity.get_statistic_collector( - tuple(reduction_shape), self._quantile, self.subset_size, self.inplace_statistics + tuple(reduction_axes), self._quantile, self.subset_size, self.inplace_statistics ) statistic_container.add_statistic_point( StatisticPoint( diff --git a/nncf/quantization/algorithms/channel_alignment/backend.py b/nncf/quantization/algorithms/channel_alignment/backend.py index a02b788cf98..9efb53ed4e5 100644 --- a/nncf/quantization/algorithms/channel_alignment/backend.py +++ b/nncf/quantization/algorithms/channel_alignment/backend.py @@ -98,12 +98,12 @@ def get_weights_port_ids_for_node(node: NNCFNode) -> Tuple[int, int]: @staticmethod @abstractmethod def get_statistic_collector( - reduction_shape, q: float, num_samples: int, inplace: bool + reduction_axes, q: float, num_samples: int, inplace: bool ) -> TensorStatisticCollectorBase: """ Get backend-specific tensor collector that collects medians of minimal and maximal quantiles. - :param reduction_shape: Target reduction shape for the reduction. + :param reduction_axes: Target reduction shape for the reduction. :param q: Minimal quantile for the tensor collector. :param num_samples: Num samples to collect by the tensor collector. :param inplace: Should statistic be calculated inplace or out of place. diff --git a/nncf/quantization/algorithms/channel_alignment/openvino_backend.py b/nncf/quantization/algorithms/channel_alignment/openvino_backend.py index 0db705b676b..1a3667be9c5 100644 --- a/nncf/quantization/algorithms/channel_alignment/openvino_backend.py +++ b/nncf/quantization/algorithms/channel_alignment/openvino_backend.py @@ -78,10 +78,10 @@ def get_add_metatypes(): @staticmethod def get_statistic_collector( - reduction_shape, q: float, num_samples: int, inplace: bool + reduction_axes, q: float, num_samples: int, inplace: bool ) -> TensorStatisticCollectorBase: tensor_collector = TensorCollector(OVMinMaxTensorStatistic) - quantile_reducer = OVQuantileReducer(reduction_shape, (q, 1 - q), inplace) + quantile_reducer = OVQuantileReducer(reduction_axes, (q, 1 - q), inplace) for port_id, container_key in enumerate([OVMinMaxTensorStatistic.MIN_STAT, OVMinMaxTensorStatistic.MAX_STAT]): aggregator = MedianAggregator(OVNNCFCollectorTensorProcessor, num_samples=num_samples) diff --git a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py index c83fe3b8e26..866e2328587 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py +++ b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py @@ -271,7 +271,7 @@ def _add_statistic_point(self, container: StatisticPointsContainer, point: Targe :param axis: Channel axis for the statistics calculation. """ stat_collector = self._backend_entity.mean_statistic_collector( - reduction_shape=axis, num_samples=self.subset_size, inplace=self.inplace_statistics + reduction_axes=axis, num_samples=self.subset_size, inplace=self.inplace_statistics ) container.add_statistic_point( StatisticPoint(target_point=point, tensor_collector=stat_collector, algorithm=self._algorithm_key) diff --git a/nncf/quantization/algorithms/fast_bias_correction/backend.py b/nncf/quantization/algorithms/fast_bias_correction/backend.py index e95c25c9cb7..ee4f4f4afef 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/backend.py @@ -79,7 +79,7 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> Transform @staticmethod @abstractmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, @@ -87,7 +87,7 @@ def mean_statistic_collector( """ Returns backend-specific mean statistic collector. - :param reduction_shape: Channel axes for the statistics aggregation. + :param reduction_axes: Channel axes for the statistics aggregation. :param inplace: Whether to calculate statistic inplace or not. :param num_samples: Maximum number of samples to collect. :param window_size: The maximum size of the samples queue. diff --git a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py index 96f57e77e05..aad081d3a16 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py @@ -64,12 +64,12 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> ONNXModel @staticmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, ) -> ONNXMeanStatisticCollector: - return ONNXMeanStatisticCollector(reduction_shape, num_samples, window_size) + return ONNXMeanStatisticCollector(reduction_axes, num_samples, window_size) @staticmethod def get_sub_input_output_names(subgraph: onnx.ModelProto) -> Tuple[str, str]: diff --git a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py index e4c77f56570..793213c5b75 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py @@ -56,12 +56,12 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> OVModelEx @staticmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, ) -> TensorCollector: - return get_mean_stat_collector(num_samples, reduction_shape, window_size, inplace) + return get_mean_stat_collector(num_samples, reduction_axes, window_size, inplace) @staticmethod def get_sub_input_output_names(subgraph: ov.Model) -> Tuple[str, str]: diff --git a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py index e6c3bf0dd25..c8da52b4f14 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py @@ -68,12 +68,12 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> PTModelEx @staticmethod def mean_statistic_collector( - reduction_shape: ReductionAxes, + reduction_axes: ReductionAxes, inplace: bool, num_samples: Optional[int] = None, window_size: Optional[int] = None, ) -> TensorCollector: - return get_mean_statisitic_collector(num_samples, reduction_shape, window_size) + return get_mean_statisitic_collector(num_samples, reduction_axes, window_size) @staticmethod def get_sub_input_output_names(subgraph: NNCFNetwork) -> Tuple[str, str]: diff --git a/nncf/quantization/algorithms/min_max/openvino_backend.py b/nncf/quantization/algorithms/min_max/openvino_backend.py index 89450446ea9..4ad4e309dc8 100644 --- a/nncf/quantization/algorithms/min_max/openvino_backend.py +++ b/nncf/quantization/algorithms/min_max/openvino_backend.py @@ -27,7 +27,7 @@ from nncf.openvino.graph.layer_attributes import OVLayerAttributes from nncf.openvino.graph.metatypes import openvino_metatypes as om from nncf.openvino.graph.metatypes.groups import OPERATIONS_WITH_WEIGHTS -from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_shape +from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_axes from nncf.openvino.graph.node_utils import get_weight_channel_axes from nncf.openvino.graph.transformations.commands import OVQuantizerInsertionCommand from nncf.openvino.graph.transformations.commands import OVTargetPoint @@ -122,7 +122,7 @@ def unify_statistics(statistics: List[OVMinMaxTensorStatistic]) -> OVMinMaxTenso return OVMinMaxTensorStatistic(min_values=min_values, max_values=max_values) @staticmethod - def _get_reduction_shape_and_use_abs_max( + def _get_reduction_axes_and_use_abs_max( nncf_graph: NNCFGraph, target_point: OVTargetPoint, quantizer_config: QuantizerConfig ) -> Tuple[ReductionAxes, bool]: use_abs_max = quantizer_config.mode == QuantizationMode.SYMMETRIC @@ -140,7 +140,7 @@ def _get_reduction_shape_and_use_abs_max( # TODO (l-bat): Disable quantizer propogation through layout changing operations channel_axis = 1 # OpenVINO activations have channel first layout: [N, C, Z, Y, X] - axes = get_channel_agnostic_reduction_shape([channel_axis], shape) + axes = get_channel_agnostic_reduction_axes([channel_axis], shape) return axes, use_abs_max assert isinstance(node.layer_attributes, OVLayerAttributes) @@ -148,7 +148,7 @@ def _get_reduction_shape_and_use_abs_max( if quantizer_config.per_channel: channel_axes = get_weight_channel_axes(node, target_point.port_id) - axes = get_channel_agnostic_reduction_shape(channel_axes, const_shape) + axes = get_channel_agnostic_reduction_axes(channel_axes, const_shape) else: axes = tuple(range(len(const_shape))) return axes, use_abs_max @@ -162,7 +162,7 @@ def get_statistic_collector( inplace: bool, num_samples: int = None, ) -> TensorCollector: - reduction_shape, use_abs_max = OVMinMaxAlgoBackend._get_reduction_shape_and_use_abs_max( + reduction_axes, use_abs_max = OVMinMaxAlgoBackend._get_reduction_axes_and_use_abs_max( nncf_graph, target_point, quantizer_config ) @@ -181,7 +181,7 @@ def get_statistic_collector( f"Aggregator type: {params.aggregator_type} is not supported for OpenVino PTQ backend yet." ) - kwargs = {"reduction_axes": reduction_shape, "inplace": inplace} + kwargs = {"reduction_axes": reduction_axes, "inplace": inplace} if params.statistics_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]: if container_key == OVMinMaxTensorStatistic.MIN_STAT: quantile = params.quantile_outlier_prob diff --git a/nncf/quantization/algorithms/smooth_quant/algorithm.py b/nncf/quantization/algorithms/smooth_quant/algorithm.py index 7e216ab9ef9..90c3baa7a13 100644 --- a/nncf/quantization/algorithms/smooth_quant/algorithm.py +++ b/nncf/quantization/algorithms/smooth_quant/algorithm.py @@ -326,11 +326,11 @@ def _calculate_input_reduction_axes(self, nncf_graph: NNCFGraph, node: NNCFNode, :return: Calculated reduction axes. """ shape = nncf_graph.get_input_edges(node)[input_port].tensor_shape - reduction_shape = tuple([0]) + reduction_axes = tuple([0]) if len(shape) > 1: channel_axis = self._backend_entity.get_activation_channel_axis(node, input_port) - reduction_shape = self._backend_entity.get_channel_agnostic_reduction_shape(channel_axis, shape) - return reduction_shape + reduction_axes = self._backend_entity.get_channel_agnostic_reduction_axes(channel_axis, shape) + return reduction_axes def _process_weight_statistics(self, node: NNCFNode, weights: TTensor, port_id: int) -> TTensor: """ diff --git a/nncf/quantization/algorithms/smooth_quant/backend.py b/nncf/quantization/algorithms/smooth_quant/backend.py index fca8aa7a4e3..e2a5b24fb87 100644 --- a/nncf/quantization/algorithms/smooth_quant/backend.py +++ b/nncf/quantization/algorithms/smooth_quant/backend.py @@ -70,7 +70,7 @@ def get_input_ports_map(node: NNCFNode, nncf_graph: NNCFGraph) -> Dict[str, int] @staticmethod @abstractmethod - def get_channel_agnostic_reduction_shape(channel_axis: int, shape: Tuple[int]) -> Tuple[int]: + def get_channel_agnostic_reduction_axes(channel_axis: int, shape: Tuple[int]) -> Tuple[int]: """ Returns filtered reduction shape without axes that corresponds channels. diff --git a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py index 80a04c4580d..12005168428 100644 --- a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py +++ b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py @@ -22,7 +22,7 @@ from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator from nncf.experimental.common.tensor_statistics.collectors import TensorCollector from nncf.openvino.graph.metatypes.openvino_metatypes import OVMatMulMetatype -from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_shape +from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_axes from nncf.openvino.graph.node_utils import get_weight_value from nncf.openvino.graph.transformations.command_creation import OVCommandCreator from nncf.openvino.graph.transformations.commands import OVMultiplyInsertionCommand @@ -61,8 +61,8 @@ def get_input_ports_map(node: NNCFNode, nncf_graph: NNCFGraph) -> Dict[str, int] return {"activation": activation_ports[0], "weight": weight_ports[0]} @staticmethod - def get_channel_agnostic_reduction_shape(channel_axis: int, shape: Tuple[int]) -> Tuple[int]: - return get_channel_agnostic_reduction_shape([channel_axis], shape) + def get_channel_agnostic_reduction_axes(channel_axis: int, shape: Tuple[int]) -> Tuple[int]: + return get_channel_agnostic_reduction_axes([channel_axis], shape) @staticmethod def get_abs_max_channel_collector( diff --git a/nncf/torch/quantization/init_range.py b/nncf/torch/quantization/init_range.py index 03e5904a1ef..28a5204ce22 100644 --- a/nncf/torch/quantization/init_range.py +++ b/nncf/torch/quantization/init_range.py @@ -31,6 +31,7 @@ from nncf.common.tensor_statistics.collectors import ReductionAxes from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase from nncf.config.schemata.algo.quantization import RANGE_INIT_TYPES_VS_DESCRIPTIONS +from nncf.experimental.common.tensor_statistics.collectors import AggregationAxes from nncf.torch.graph.graph import PTNNCFGraph from nncf.torch.initialization import DataLoaderBaseRunner from nncf.torch.nncf_network import NNCFNetwork @@ -104,7 +105,7 @@ def __init__( self._input_shape = input_shape self._channel_idx = channel_idx - def get_reduction_axes(self, per_sample_stats) -> ReductionAxes: + def get_reduction_axes(self, per_sample_stats: bool) -> ReductionAxes: """ Calculates the reduction axes of the tensor. @@ -112,17 +113,17 @@ def get_reduction_axes(self, per_sample_stats) -> ReductionAxes: :return: Shape to reduce to. """ ndims = len(self._input_shape) - reduction_shape = list(range(ndims)) # type: List[int] + reduction_axes = list(range(ndims)) # type: List[int] if self._per_channel: val = (ndims + self._channel_idx) % ndims - reduction_shape.remove(val) + reduction_axes.remove(val) if not val and self.use_per_sample_stats(per_sample_stats): raise RuntimeError("Batch dimension should be equal to zero") if self.use_per_sample_stats(per_sample_stats): - reduction_shape = reduction_shape[1:] # Assumes batch is the first dimension - return tuple(reduction_shape) + reduction_axes = reduction_axes[1:] # Assumes batch is the first dimension + return tuple(reduction_axes) - def get_aggregation_axes(self, per_sample_stats) -> Tuple[int, ...]: + def get_aggregation_axes(self, per_sample_stats: bool) -> AggregationAxes: """ Calculates the aggregation axes of the tensor. diff --git a/tests/openvino/native/test_model_transformer.py b/tests/openvino/native/test_model_transformer.py index ccccfb38ae2..608e98a5e26 100644 --- a/tests/openvino/native/test_model_transformer.py +++ b/tests/openvino/native/test_model_transformer.py @@ -286,7 +286,7 @@ def test_split_inplace_fn_insertion(test_params: InplaceOpTestCase): ) def test_inplace_reduce_fn_dynamic_shapes(input_shape, raise_error): input_1 = opset.parameter(input_shape, name="Input") - fn = get_inplace_min_op("test", reduction_shape=None) + fn = get_inplace_min_op("test", reduction_axes=None) if raise_error: with pytest.raises(RuntimeError): fn(input_1, 0) @@ -297,8 +297,8 @@ def test_inplace_reduce_fn_dynamic_shapes(input_shape, raise_error): assert all(np.equal(get_prev_node(op, 1).get_data(), ref_const)) -@pytest.mark.parametrize("reduction_shape", [None, np.array([], dtype=np.int64)]) -def test_inplace_reduce_fn_zero_rank_output(reduction_shape): +@pytest.mark.parametrize("reduction_axes", [None, np.array([], dtype=np.int64)]) +def test_inplace_reduce_fn_zero_rank_output(reduction_axes): model = ZeroRankEltwiseModel().ov_model target_layer = "Add" port_id = 1 @@ -310,7 +310,7 @@ def test_inplace_reduce_fn_zero_rank_output(reduction_shape): OVInplaceFnInsertionCommand, port_id, { - "inplace_op_fn": get_inplace_min_op(name, reduction_shape=reduction_shape), + "inplace_op_fn": get_inplace_min_op(name, reduction_axes=reduction_axes), "fn_output_port_id": 0, }, ) diff --git a/tests/openvino/native/test_node_utils.py b/tests/openvino/native/test_node_utils.py index 4a2a24872d4..3fbd595af23 100644 --- a/tests/openvino/native/test_node_utils.py +++ b/tests/openvino/native/test_node_utils.py @@ -17,7 +17,7 @@ from nncf.openvino.graph.layer_attributes import OVLayerAttributes from nncf.openvino.graph.metatypes.openvino_metatypes import OVMatMulMetatype from nncf.openvino.graph.nncf_graph_builder import GraphConverter -from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_shape +from nncf.openvino.graph.node_utils import get_channel_agnostic_reduction_axes from nncf.openvino.graph.node_utils import get_weight_channel_axes from nncf.openvino.graph.node_utils import get_weight_value from nncf.openvino.graph.node_utils import is_node_with_bias @@ -91,7 +91,7 @@ def test_get_weight_channel_axes_for_matmul(weights_port_id, transpose, shape, e @pytest.mark.parametrize( - "shape, channel_axes, ref_reduction_shape", + "shape, channel_axes, ref_reduction_axes", [ ((1, 128), [-1], (0,)), ((1, 256, 1), [-2], (0, 2)), @@ -101,7 +101,7 @@ def test_get_weight_channel_axes_for_matmul(weights_port_id, transpose, shape, e ((1, 1, 12, 12), [1, 2], (0, 3)), ], ) -def test_get_channel_agnostic_reduction_shape(shape, channel_axes, ref_reduction_shape): - reduction_shape = get_channel_agnostic_reduction_shape(channel_axes=channel_axes, shape=shape) +def test_get_channel_agnostic_reduction_axes(shape, channel_axes, ref_reduction_axes): + reduction_axes = get_channel_agnostic_reduction_axes(channel_axes=channel_axes, shape=shape) - assert reduction_shape == ref_reduction_shape + assert reduction_axes == ref_reduction_axes diff --git a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py index c26de05bff8..bffe249d064 100644 --- a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py +++ b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py @@ -19,7 +19,6 @@ from nncf.common.quantization.structs import QuantizationMode from nncf.common.quantization.structs import QuantizerConfig from nncf.common.quantization.structs import QuantizerGroup -from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic from nncf.quantization.fake_quantize import FakeQuantizeParameters from nncf.quantization.fake_quantize import calculate_quantizer_parameters from tests.post_training.conftest import FQ_CALCULATED_PARAMETERS_PATH diff --git a/tests/post_training/test_templates/test_channel_alignment.py b/tests/post_training/test_templates/test_channel_alignment.py index d48478b5fe1..21f6cfdeaaf 100644 --- a/tests/post_training/test_templates/test_channel_alignment.py +++ b/tests/post_training/test_templates/test_channel_alignment.py @@ -483,7 +483,7 @@ def test_statistic_collectors(self, inplace_ref, q_ref): reduction_axes_ref = (0, 2, 3) num_samples_ref = 123 statistic_collector: TensorCollector = self.get_backend_cls().get_statistic_collector( - reduction_shape=reduction_axes_ref, q=q_ref, num_samples=num_samples_ref, inplace=inplace_ref + reduction_axes=reduction_axes_ref, q=q_ref, num_samples=num_samples_ref, inplace=inplace_ref ) assert len(statistic_collector.reducers) == 1 diff --git a/tests/post_training/test_templates/test_quantizer_config.py b/tests/post_training/test_templates/test_quantizer_config.py index dc9a33a0cf5..72da2111a36 100644 --- a/tests/post_training/test_templates/test_quantizer_config.py +++ b/tests/post_training/test_templates/test_quantizer_config.py @@ -77,8 +77,8 @@ def conv_sum_aggregation_nncf_graph(self) -> NNCFGraphToTestSumAggregation: class TestGetStatisticsCollectorParameters: target_type: TargetType target_node_name: str - ref_per_ch_reduction_shape: List[int] - ref_per_tensor_reduction_shape: List[int] + ref_per_ch_reduction_axes: List[int] + ref_per_tensor_reduction_axes: List[int] @abstractmethod @pytest.fixture @@ -283,8 +283,8 @@ def test_get_stat_collector( for reducer in reducers: if q_config_per_channel: - assert self.get_reduction_axes(reducer) == params.ref_per_ch_reduction_shape + assert self.get_reduction_axes(reducer) == params.ref_per_ch_reduction_axes else: - assert self.get_reduction_axes(reducer) == params.ref_per_tensor_reduction_shape + assert self.get_reduction_axes(reducer) == params.ref_per_tensor_reduction_axes assert tensor_collector.num_samples == num_samples diff --git a/tests/post_training/test_templates/test_smooth_quant.py b/tests/post_training/test_templates/test_smooth_quant.py index 88f2704b625..5ea93677f66 100644 --- a/tests/post_training/test_templates/test_smooth_quant.py +++ b/tests/post_training/test_templates/test_smooth_quant.py @@ -128,13 +128,13 @@ def test_smooth_quant_algo(self, model_cls, reference_values, tmpdir): # pylint:disable=protected-access def test_get_abs_max_channel_collector(self): backend = self.get_backend() - reduction_shape = (3, 2, 1) + reduction_axes = (3, 2, 1) samples = 1 for inplace_type in [False, True]: backend_tensor_collector = backend.get_abs_max_channel_collector( num_samples=samples, - stats_reduction_axes=reduction_shape, + stats_reduction_axes=reduction_axes, inplace=inplace_type, branch_key="test_branch", ) @@ -145,7 +145,7 @@ def test_get_abs_max_channel_collector(self): for reducer in backend_tensor_collector.reducers: assert isinstance(reducer, AbsMaxReducer) assert reducer.inplace == inplace_type - assert reducer._reduction_axes == reduction_shape + assert reducer._reduction_axes == reduction_axes @pytest.mark.parametrize( "model_cls, references", diff --git a/tests/torch/tensor_statistics/test_tensor_statistics.py b/tests/torch/tensor_statistics/test_tensor_statistics.py index 05f731b57bf..1d04f2a706b 100644 --- a/tests/torch/tensor_statistics/test_tensor_statistics.py +++ b/tests/torch/tensor_statistics/test_tensor_statistics.py @@ -37,7 +37,7 @@ class TestCollectedStatistics: ] @pytest.mark.parametrize( - ("collector", "reduction_shapes_vs_ref_statistic"), + ("collector", "reduction_axes_vs_ref_statistic"), [ ( get_min_max_statistic_collector, @@ -113,9 +113,9 @@ class TestCollectedStatistics: def test_collected_statistics_with_shape_convert( self, collector: Type[TensorStatisticCollectorBase], - reduction_shapes_vs_ref_statistic: Dict[Tuple[ReductionAxes, ReductionAxes], TensorStatistic], + reduction_axes_vs_ref_statistic: Dict[Tuple[ReductionAxes, ReductionAxes], TensorStatistic], ): - for shapes in reduction_shapes_vs_ref_statistic.keys(): + for shapes in reduction_axes_vs_ref_statistic.keys(): scale_shape, reducer_axes = shapes collector_obj = collector( scale_shape=scale_shape, @@ -127,10 +127,10 @@ def test_collected_statistics_with_shape_convert( for input_ in TestCollectedStatistics.REF_INPUTS: collector_obj.register_input_for_all_reducers(PTNNCFTensor(input_)) test_stats = collector_obj.get_statistics() - assert reduction_shapes_vs_ref_statistic[shapes] == test_stats + assert reduction_axes_vs_ref_statistic[shapes] == test_stats @pytest.mark.parametrize( - ("collector", "reduction_shapes_vs_ref_statistic"), + ("collector", "reduction_axes_vs_ref_statistic"), [ ( get_median_mad_statistic_collector, @@ -204,16 +204,16 @@ def test_collected_statistics_with_shape_convert( def test_collected_statistics( self, collector: Type[TensorStatisticCollectorBase], - reduction_shapes_vs_ref_statistic: Dict[ReductionAxes, TensorStatistic], + reduction_axes_vs_ref_statistic: Dict[ReductionAxes, TensorStatistic], ): - for reduction_shape in reduction_shapes_vs_ref_statistic: - if len(reduction_shape) > 1: - reducer_axes = ([dim for dim, val in enumerate(reduction_shape) if val == 1][0],) + for reduction_axes in reduction_axes_vs_ref_statistic: + if len(reduction_axes) > 1: + reducer_axes = ([dim for dim, val in enumerate(reduction_axes) if val == 1][0],) else: reducer_axes = (0, 1) collector_obj = collector( - scale_shape=reduction_shape, + scale_shape=reduction_axes, reduction_axes=reducer_axes, aggregation_axes=(0,), num_samples=None, @@ -221,7 +221,7 @@ def test_collected_statistics( for input_ in TestCollectedStatistics.REF_INPUTS: collector_obj.register_input_for_all_reducers(PTNNCFTensor(input_)) test_stats = collector_obj.get_statistics() - assert reduction_shapes_vs_ref_statistic[reduction_shape] == test_stats + assert reduction_axes_vs_ref_statistic[reduction_axes] == test_stats class TestCollectorTensorProcessor: