From bdfd1157f136299c5a2a54c1380c94464df28101 Mon Sep 17 00:00:00 2001
From: dlyakhov <daniil.lyakhov@intel.com>
Date: Tue, 26 Sep 2023 09:44:58 +0200
Subject: [PATCH] [Torch] Experimental tensor collector is using for statistic
 collection

---
 nncf/common/tensor_statistics/collectors.py   | 110 +++-
 nncf/common/tensor_statistics/statistics.py   |   7 +
 .../common/tensor_statistics/collectors.py    | 340 ++++++++---
 nncf/onnx/graph/node_utils.py                 |   4 +-
 nncf/onnx/statistics/collectors.py            |  86 ++-
 nncf/openvino/statistics/collectors.py        |  91 +--
 .../algorithms/bias_correction/backend.py     |   4 +-
 .../bias_correction/onnx_backend.py           |   6 +-
 .../bias_correction/openvino_backend.py       |   4 +-
 .../fast_bias_correction/backend.py           |   4 +-
 .../fast_bias_correction/onnx_backend.py      |   6 +-
 .../fast_bias_correction/openvino_backend.py  |   4 +-
 .../fast_bias_correction/torch_backend.py     |  13 +-
 .../algorithms/min_max/openvino_backend.py    |   6 +-
 .../algorithms/min_max/torch_backend.py       | 109 ++--
 .../algorithms/smooth_quant/algorithm.py      |  10 +-
 .../algorithms/smooth_quant/backend.py        |   4 +-
 .../smooth_quant/openvino_backend.py          |   6 +-
 nncf/scopes.py                                |   6 +-
 nncf/tensorflow/quantization/init_range.py    |   4 +-
 .../tensor_statistics/collectors.py           |  40 +-
 .../tensorflow/tensor_statistics/reduction.py |   8 +-
 nncf/torch/quantization/algo.py               |  14 +-
 nncf/torch/quantization/init_range.py         | 116 ++--
 nncf/torch/statistics/aggregator.py           |   4 +-
 nncf/torch/tensor.py                          |   3 +
 nncf/torch/tensor_statistics/algo.py          |  38 +-
 nncf/torch/tensor_statistics/collectors.py    | 572 +++++++++++++-----
 nncf/torch/tensor_statistics/statistics.py    |  13 +-
 .../test_reducers_and_aggregators.py          | 229 +++++--
 .../experimental/test_statistic_collector.py  | 204 ++++++-
 tests/common/test_statistics_aggregator.py    |  14 +-
 .../quantization/test_quantizer_config.py     |   8 +
 ...y => test_calculation_quantizer_params.py} |   0
 .../quantization/test_quantizer_config.py     |   8 +
 .../test_reducers_and_aggregators.py          |  14 +-
 .../native/test_statistic_collector.py        |  40 ++
 .../test_calculate_quantizer_parameters.py    |   3 +-
 .../test_templates/test_channel_alignment.py  |   9 +-
 .../test_templates/test_quantizer_config.py   |   9 +-
 .../test_templates/test_smooth_quant.py       |   4 +-
 .../test_tensor_statistics.py                 |   8 +-
 tests/torch/ptq/helpers.py                    |   4 +-
 .../ptq/test_calculation_quantizer_params.py  |   3 +-
 tests/torch/ptq/test_ptq_params.py            |  22 +-
 tests/torch/ptq/test_quantizer_config.py      |  30 +-
 .../ptq/test_reducers_and_aggregators.py      |  83 +++
 tests/torch/ptq/test_statistic_collector.py   |  55 ++
 tests/torch/quantization/test_range_init.py   | 369 ++++++++---
 .../test_tensor_statistics.py                 | 163 ++---
 tests/torch/test_statistics_aggregator.py     |   2 +-
 51 files changed, 2151 insertions(+), 762 deletions(-)
 rename tests/openvino/native/quantization/{test_fq_configurations.py => test_calculation_quantizer_params.py} (100%)
 create mode 100644 tests/torch/ptq/test_reducers_and_aggregators.py
 create mode 100644 tests/torch/ptq/test_statistic_collector.py

diff --git a/nncf/common/tensor_statistics/collectors.py b/nncf/common/tensor_statistics/collectors.py
index 907ae30fec8..005e4ceebd8 100644
--- a/nncf/common/tensor_statistics/collectors.py
+++ b/nncf/common/tensor_statistics/collectors.py
@@ -12,7 +12,7 @@
 from abc import ABC
 from abc import abstractmethod
 from collections import deque
-from typing import Callable, List, Optional, Tuple, Union
+from typing import List, Optional, Tuple, Union
 
 import numpy as np
 
@@ -21,14 +21,13 @@
 from nncf.common.tensor import TensorType
 from nncf.common.tensor_statistics.reduction import get_per_channel_history
 
-ReductionShape = Tuple[int]
-MaskedReduceFN = Callable[[NNCFTensor, Union[int, tuple, list], NNCFTensor, bool], NNCFTensor]
+ReductionAxes = Tuple[int]
 
 
 class TensorStatisticCollectorBase(ABC):
     """Collector estimate statistics at the quantization point based on the provided reduction shape."""
 
-    def __init__(self, reduction_shape: Optional[ReductionShape] = None, num_samples: Optional[int] = None):
+    def __init__(self, reduction_shape: Optional[ReductionAxes] = None, num_samples: Optional[int] = None):
         """
         Initializes Tensor Statistic Collector
 
@@ -101,7 +100,7 @@ class OfflineTensorStatisticCollector(TensorStatisticCollectorBase):
     """Collects statistics in offline regime by storing the data and aggregating it afterwards."""
 
     def __init__(
-        self, reduction_shape: Optional[ReductionShape] = None, num_samples: int = None, window_size: int = None
+        self, reduction_shape: Optional[ReductionAxes] = None, num_samples: int = None, window_size: int = None
     ):
         super().__init__(reduction_shape, num_samples)
         self._samples = deque(maxlen=window_size)
@@ -199,9 +198,9 @@ def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCF
         :return: Reduced NNCFTensor.
         """
 
-    @staticmethod
+    @classmethod
     @abstractmethod
-    def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+    def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
         """
         Computes the masked mean of elements across given dimensions of NNCFTensor.
 
@@ -214,9 +213,11 @@ def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor,
         :return: Reduced NNCFTensor.
         """
 
-    @staticmethod
+    @classmethod
     @abstractmethod
-    def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+    def masked_median(
+        cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False
+    ) -> NNCFTensor:
         """
         Computes the masked median of elements across given dimensions of NNCFTensor.
 
@@ -251,6 +252,16 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
         :return: List of NNCFTensor.
         """
 
+    @staticmethod
+    @abstractmethod
+    def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
+        """
+        Remove axes of length one from x.
+
+        :param x: NNCFTensor to squeeze.
+        :param axis: Selects a subset of the entries of length one in the shape.
+        """
+
     @staticmethod
     @abstractmethod
     def sum(tensor: NNCFTensor) -> TensorElementsType:
@@ -267,15 +278,36 @@ def quantile(
         tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
     ) -> List[TensorElementsType]:
         """
-        Compute the quantile-th percentile(s) of the data along the specified axis.
+        Compute the quantile(s) of the data along the specified axis.
 
         :param tensor: Given NNCFTensor.
-        :params quantile: Percentile or sequence of percentiles to compute, which must be between
+        :params quantile: Quantile or sequence of quantiles to compute, which must be between
             0 and 1 inclusive.
+        :param axis: Axis or axes along which the quantiles are computed.
+        :param keepdims: If True, the axes which are reduced are left in the result
+            as dimensions with size one.
+        :returns: List of the quantile(s) of the tensor elements.
+        """
+
+    @classmethod
+    @abstractmethod
+    def percentile(
+        cls,
+        tensor: NNCFTensor,
+        percentile: Union[float, List[float]],
+        axis: Union[int, tuple, list],
+        keepdims: bool = False,
+    ) -> List[TensorElementsType]:
+        """
+        Compute the percentile(s) of the data along the specified axis.
+
+        :param tensor: Given NNCFTensor.
+        :params percentile: percentile or sequence of percentiles to compute, which must be between
+            0 and 100 inclusive.
         :param axis: Axis or axes along which the percentiles are computed.
         :param keepdims: If True, the axes which are reduced are left in the result
             as dimensions with size one.
-        :returns: List of the quantile-th percentile(s) of the tensor elements.
+        :returns: List of the percentile(s) of the tensor elements.
         """
 
     @staticmethod
@@ -289,27 +321,47 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
         :return: Reduced NNCFTensor.
         """
 
-    @classmethod
+    @staticmethod
+    def logical_or(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        """
+        Computes the element-wise logical OR of the given input tensors.
+        Zeros are treated as False and nonzeros are treated as True.
+
+        :param input_: The input tensor.
+        :param other: The tensor to compute or with.
+        :return: Result of elementwise or operation between input_ and other tensor.
+        """
+
+    @staticmethod
+    def less(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        """
+        Return the truth value of (x1 < x2) element-wise.
+
+        :param input_: The input tensor.
+        :param other: The tensor to compute or with.
+        :return: Result of elementwise less operation between input_ and other tensor.
+        """
+
+    @staticmethod
     @abstractmethod
-    def no_outliers_map(cls, x: NNCFTensor, fn: MaskedReduceFN, axis: int = 0, alpha: float = 0.01) -> NNCFTensor:
+    def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
+        """
+        Returns result of a substract b operation.
         """
-        Computes quantiles [alpha, 1 - alpha] on given tensor, masks all elements that
-        are smaller that alpha and bigger than 1 - alpha quantile and applies
-        given masked reduction function fn.
 
-        :param tensor: Given NNCFTensor.
-        :param fn: Masked reduce operation from the same NNCFCollectorTensorProcessor class.
-        :param axis: Axis along which the reduction function is computed.
-        :params alpha: Minimal percentile to filter outliers outside the range
-            [quantile(alpha), quantile(1 - alpha)]. Must be between 0 and 1. inclusive.
-        :returns: Result of given masked reduction function on filtered from outliers NNCFTensor.
+    @classmethod
+    @abstractmethod
+    def zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
+        """
+        Returns binary mask from the input x which equal true for all elemets that are smaller than
+        corresponding machine epsilon.
         """
 
 
 class MinMaxStatisticCollector(OnlineTensorStatisticCollector):
     """Collector estimates min of minimum values and max of maximum values."""
 
-    def __init__(self, use_abs_max: bool, reduction_shape: ReductionShape, num_samples: int = None):
+    def __init__(self, use_abs_max: bool, reduction_shape: ReductionAxes, num_samples: int = None):
         super().__init__(reduction_shape, num_samples)
         self._use_abs_max = use_abs_max
         self._tensor_processor = self._get_processor()
@@ -353,7 +405,7 @@ def __init__(
         self,
         use_per_sample_stats: bool,
         use_abs_max: bool,
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         num_samples: int = None,
         window_size: int = None,
     ):
@@ -407,7 +459,7 @@ def __init__(
         use_abs_max: bool,
         use_means_of_mins: bool,
         use_means_of_maxs: bool,
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         num_samples: int = None,
         window_size: int = None,
     ):
@@ -448,7 +500,7 @@ class MeanStatisticCollector(OfflineTensorStatisticCollector):
     """
 
     def __init__(
-        self, reduction_shape: ReductionShape, num_samples: Optional[int] = None, window_size: Optional[int] = None
+        self, reduction_shape: ReductionAxes, num_samples: Optional[int] = None, window_size: Optional[int] = None
     ) -> None:
         """
         :param reduction_shape: The shape for the reduction while statistics collection.
@@ -536,7 +588,7 @@ class PercentileStatisticCollector(OfflineTensorStatisticCollector):
     def __init__(
         self,
         percentiles_to_collect: List[float],
-        reduction_shape: Optional[ReductionShape] = None,
+        reduction_shape: Optional[ReductionAxes] = None,
         num_samples: int = None,
         window_size: int = None,
     ):
@@ -561,7 +613,7 @@ class MeanPercentileStatisticCollector(OfflineTensorStatisticCollector):
     def __init__(
         self,
         percentiles_to_collect: List[float],
-        reduction_shape: Optional[ReductionShape] = None,
+        reduction_shape: Optional[ReductionAxes] = None,
         num_samples: int = None,
         window_size: int = None,
     ):
diff --git a/nncf/common/tensor_statistics/statistics.py b/nncf/common/tensor_statistics/statistics.py
index 0f6d0d1aad3..1bc62db3464 100644
--- a/nncf/common/tensor_statistics/statistics.py
+++ b/nncf/common/tensor_statistics/statistics.py
@@ -20,6 +20,8 @@
 class TensorStatistic(ABC):
     """Base class that stores statistic data"""
 
+    TENSOR_STATISTIC_OUTPUT_KEY = "tensor_statistic_output"
+
     @staticmethod
     @abstractmethod
     def tensor_eq(tensor1: TensorType, tensor2: TensorType, rtol=1e-6) -> bool:
@@ -63,6 +65,9 @@ def __eq__(self, other: "MeanTensorStatistic") -> bool:
 
 
 class MedianMADTensorStatistic(TensorStatistic):
+    MEDIAN_VALUES_STAT = "median_values"
+    MAD_VALUES_STAT = "mad_values"
+
     def __init__(self, median_values, mad_values):
         self.median_values = median_values
         self.mad_values = mad_values
@@ -74,6 +79,8 @@ def __eq__(self, other: "MedianMADTensorStatistic") -> bool:
 
 
 class PercentileTensorStatistic(TensorStatistic):
+    PERCENTILE_VS_VALUE_DICT = "percentile_vs_values_dict"
+
     def __init__(self, percentile_vs_values_dict):
         self.percentile_vs_values_dict = percentile_vs_values_dict
 
diff --git a/nncf/experimental/common/tensor_statistics/collectors.py b/nncf/experimental/common/tensor_statistics/collectors.py
index 3655fffe5d6..f5dbc3236a2 100644
--- a/nncf/experimental/common/tensor_statistics/collectors.py
+++ b/nncf/experimental/common/tensor_statistics/collectors.py
@@ -18,11 +18,17 @@
 from nncf.common.tensor import TensorType
 from nncf.common.tensor_statistics.collectors import NNCFCollectorTensorProcessor
 from nncf.common.tensor_statistics.collectors import NNCFTensor
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
+from nncf.common.tensor_statistics.statistics import MeanTensorStatistic
+from nncf.common.tensor_statistics.statistics import MedianMADTensorStatistic
+from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
+from nncf.common.tensor_statistics.statistics import PercentileTensorStatistic
+from nncf.common.tensor_statistics.statistics import RawTensorStatistic
 from nncf.common.tensor_statistics.statistics import TensorStatistic
 from nncf.quantization.advanced_parameters import AggregatorType
 
 InplaceInsertionFNType = TypeVar("InplaceInsertionFNType")
+AggregationAxes = Tuple[int, ...]
 
 
 class TensorReducerBase(ABC):
@@ -31,16 +37,16 @@ class TensorReducerBase(ABC):
     the specified rule. Could handle tensors inplace or out of place.
     """
 
-    def __init__(self, reduction_shape: Optional[ReductionShape] = None, inplace: bool = False):
+    def __init__(self, reduction_axes: Optional[ReductionAxes] = None, inplace: bool = False):
         """
-        :param reduction_shape: Reduction shape for reduction calculation. Equal to list(range(len(input.shape)))
+        :param reduction_axes: Reduction axes for reduction calculation. Equal to list(range(len(input.shape)))
             if empty.
         :param inplace: Whether should be calculated inplace or out of place.
-
         """
-        self._reduction_shape = reduction_shape
+        self._reduction_axes = reduction_axes
         self._tensor_processor: NNCFCollectorTensorProcessor = self._get_processor()
         self._inplace = inplace
+        self._keepdims = True
 
     @property
     def inplace(self):
@@ -95,34 +101,44 @@ def __call__(self, x: List[NNCFTensor]):
     def __eq__(self, __o: object) -> bool:
         return (
             isinstance(__o, self.__class__)
-            and self._reduction_shape == __o._reduction_shape
+            and self._reduction_axes == __o._reduction_axes
             and self._inplace == __o.inplace
         )
 
     def __hash__(self) -> int:
-        return hash((self.__class__.__name__, self.inplace, self._reduction_shape))
+        return hash((self.__class__.__name__, self.inplace, self._reduction_axes))
 
-    def _get_reduction_shape(self, tensor: NNCFTensor) -> Union[int, Tuple[int, ...]]:
-        if self._reduction_shape is not None:
-            return self._reduction_shape
+    def _get_reduction_axes(self, tensor: NNCFTensor) -> ReductionAxes:
+        if self._reduction_axes is not None:
+            return self._reduction_axes
         return tuple(range(len(tensor.shape)))
 
 
-class TensorAggregatorBase:
+class AggregatorBase:
     """
-    Tensor aggregator is designed to receive (register) calculated statistics and
+    Aggregator is designed to receive (register) calculated statistics and
     aggregate them in terms of NNCFCollectorTensorProcessor operations.
     """
 
-    def __init__(self, tensor_processor: NNCFCollectorTensorProcessor, num_samples: Optional[int] = None):
+    def __init__(
+        self,
+        tensor_processor: NNCFCollectorTensorProcessor,
+        aggregation_axes: Optional[AggregationAxes] = None,
+        num_samples: Optional[int] = None,
+    ):
         """
         :param tensor_processor: Backend-specific tensor processor.
+        :param aggregation_axes: Axes along which to operate.
+            Registered statistics are stacked along zero axis,
+            axes >=1 correspond to recieved statistic axes shifted left by 1.
         :param num_samples: Maximum number of samples to collect. Aggregator
             skips tensor registration if tensor registration was called num_samples times before.
             Aggregator never skips registration if num_samples is None.
         """
 
         self._tensor_processor = tensor_processor
+        self._aggregation_axes = (0,) if aggregation_axes is None else aggregation_axes
+        self._keepdims = False
         self._num_samples = num_samples
         self._collected_samples = 0
         self._container = []
@@ -187,8 +203,8 @@ class TensorCollector:
 
     def __init__(self, statistic_container: Optional[TensorStatistic] = None) -> None:
         self._reducers: Set[TensorReducerBase] = set()
-        self._aggregators: Dict[Tuple[int, int], TensorAggregatorBase] = {}
-        self._stat_container_kwargs_map: Dict[str, Tuple[int, int]] = {}
+        self._aggregators: Dict[Tuple[int, int, int], AggregatorBase] = {}
+        self._stat_container_kwargs_map: Dict[str, Tuple[int, int, int]] = {}
         self._stat_container = statistic_container
         self._enabled = True
 
@@ -224,7 +240,7 @@ def register_statistic_branch(
         self,
         container_key: str,
         reducer: TensorReducerBase,
-        aggregator: TensorAggregatorBase,
+        aggregator: AggregatorBase,
         reducer_output_port_id: int = 0,
     ) -> None:
         """
@@ -290,6 +306,14 @@ def register_inputs(self, inputs: Dict[int, List[NNCFTensor]]) -> None:
             if reducer_hash in reduced_inputs:
                 aggregator.register_reduced_input(reduced_inputs[reducer_hash][reducer_port_id])
 
+    def register_input_for_all_reducers(self, input_: NNCFTensor) -> None:
+        """
+        Registers given input_ in each avaliable statistic collection branch.
+
+        :param input_: Tensor input to register.
+        """
+        self.register_inputs({hash(reducer): [input_] for reducer in self._reducers})
+
     def _aggregate(self) -> None:
         result = {}
         for (
@@ -315,7 +339,7 @@ def get_statistics(self) -> Union[TensorStatistic, Dict[str, Any]]:
 
         if not self._stat_container:
             return kwargs
-        return self._stat_container(**kwargs)
+        return self._build_statistic_container(self._stat_container, kwargs)
 
     def get_inplace_fn_info(self) -> List[Tuple[Any, int]]:
         """
@@ -338,7 +362,7 @@ def any_stat_out_of_place(self) -> bool:
         """
         return any(not reducer.inplace for reducer in self._reducers)
 
-    def replace_aggregator(self, key: Tuple[int, int, int], aggregator: TensorAggregatorBase) -> None:
+    def replace_aggregator(self, key: Tuple[int, int, int], aggregator: AggregatorBase) -> None:
         """
         Friend method that replaces aggregator instance on equivalent one.
         Key should be valid for for given aggregator and a statistic branch
@@ -361,8 +385,8 @@ def get_tensor_collector_inputs(
     ) -> Dict[int, List[NNCFTensor]]:
         """
         Static method that converts all model outputs and collected output_info
-        to a layout required for `register_input` method. This method is not a part of
-        `register_input` to avoid all inputs passing to `TensorCollector.register_input` method.
+        to a layout required for `register_inputs` method. This method is not a part of
+        `register_inputs` to avoid all inputs passing to `TensorCollector.register_inputs` method.
 
         :param outputs: Target model outputs.
         :param output_info: Output info collected by a `TensorCollector.get_output_info` method.
@@ -373,6 +397,39 @@ def get_tensor_collector_inputs(
             target_inputs[reducer] = [outputs[name] for name in names]
         return target_inputs
 
+    @staticmethod
+    def _build_statistic_container(statistic_container_cls: TensorStatistic, kwargs: Dict[Any, Any]):
+        if issubclass(statistic_container_cls, MinMaxTensorStatistic):
+            return statistic_container_cls(
+                min_values=kwargs[MinMaxTensorStatistic.MIN_STAT], max_values=kwargs[MinMaxTensorStatistic.MAX_STAT]
+            )
+        if issubclass(statistic_container_cls, MeanTensorStatistic):
+            return statistic_container_cls(
+                mean_values=kwargs[MeanTensorStatistic.MEAN_STAT], shape=kwargs[MeanTensorStatistic.SHAPE_STAT]
+            )
+        if issubclass(statistic_container_cls, RawTensorStatistic):
+            return statistic_container_cls(values=kwargs[RawTensorStatistic.VALUES_STATS])
+        if issubclass(statistic_container_cls, MedianMADTensorStatistic):
+            return statistic_container_cls(
+                median_values=kwargs[MedianMADTensorStatistic.TENSOR_STATISTIC_OUTPUT_KEY][
+                    MedianMADTensorStatistic.MEDIAN_VALUES_STAT
+                ],
+                mad_values=kwargs[MedianMADTensorStatistic.TENSOR_STATISTIC_OUTPUT_KEY][
+                    MedianMADTensorStatistic.MAD_VALUES_STAT
+                ],
+            )
+        if issubclass(statistic_container_cls, PercentileTensorStatistic):
+            if PercentileTensorStatistic.TENSOR_STATISTIC_OUTPUT_KEY in kwargs:
+                percentile_vs_values_dict = kwargs[PercentileTensorStatistic.TENSOR_STATISTIC_OUTPUT_KEY]
+            else:
+                percentile_vs_values_dict = {}
+                for (_, percentile), value in kwargs.items():
+                    percentile_vs_values_dict[percentile] = value
+            return statistic_container_cls(percentile_vs_values_dict=percentile_vs_values_dict)
+        raise RuntimeError(
+            f"Statistic collector class {statistic_container_cls} is not supported by the TensorCollector class."
+        )
+
 
 class MergedTensorCollector(TensorCollector):
     """
@@ -388,7 +445,7 @@ def __init__(self, tensor_collectors: List[TensorCollector]) -> None:
         :param tensor_collectors: Tensor collectors to merge.
         """
         super().__init__()
-        aggregators: Dict[Tuple[int, int], List[Tuple[TensorCollector, TensorAggregatorBase]]] = defaultdict(list)
+        aggregators: Dict[Tuple[int, int, int], List[Tuple[TensorCollector, AggregatorBase]]] = defaultdict(list)
         for tensor_collector in tensor_collectors:
             if not tensor_collector.enabled:
                 continue
@@ -424,68 +481,69 @@ def _reduce_out_of_place(self, x: List[TensorType]) -> List[TensorType]:
 class MinReducer(TensorReducerBase):
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = x[0]
-        reduction_shape = self._get_reduction_shape(x)
-        return [self._tensor_processor.reduce_min(x, reduction_shape, keepdims=True)]
+        reduction_axes = self._get_reduction_axes(x)
+        return [self._tensor_processor.reduce_min(x, reduction_axes, keepdims=self._keepdims)]
 
 
 class MaxReducer(TensorReducerBase):
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = x[0]
-        reduction_shape = self._get_reduction_shape(x)
-        return [self._tensor_processor.reduce_max(x, reduction_shape, keepdims=True)]
+        reduction_axes = self._get_reduction_axes(x)
+        return [self._tensor_processor.reduce_max(x, reduction_axes, keepdims=self._keepdims)]
 
 
 class AbsMaxReducer(TensorReducerBase):
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = self._tensor_processor.abs(x[0])
-        reduction_shape = self._get_reduction_shape(x)
-        return [self._tensor_processor.reduce_max(x, reduction_shape, keepdims=True)]
+        reduction_axes = self._get_reduction_axes(x)
+        return [self._tensor_processor.reduce_max(x, reduction_axes, keepdims=self._keepdims)]
 
 
 class MeanReducer(TensorReducerBase):
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = x[0]
-        reduction_shape = self._get_reduction_shape(x)
-        return [self._tensor_processor.mean(x, reduction_shape, keepdims=True)]
+        reduction_axes = self._get_reduction_axes(x)
+        return [self._tensor_processor.mean(x, reduction_axes, keepdims=self._keepdims)]
 
 
 class QuantileReducerBase(TensorReducerBase):
     def __init__(
         self,
-        reduction_shape: Optional[ReductionShape] = None,
+        reduction_axes: Optional[ReductionAxes] = None,
         quantile: Optional[Union[float, Tuple[float]]] = None,
         inplace: bool = False,
     ):
-        super().__init__(reduction_shape, False)
+        super().__init__(reduction_axes=reduction_axes, inplace=False)
         self._quantile = (0.01, 0.99) if quantile is None else quantile
 
     def __eq__(self, __o: object) -> bool:
         return super().__eq__(__o) and self._quantile == __o._quantile
 
     def __hash__(self) -> int:
-        return hash((self.__class__.__name__, self.inplace, self._reduction_shape, tuple(self._quantile)))
+        return hash((self.__class__.__name__, self.inplace, self._reduction_axes, tuple(self._quantile)))
 
 
 class QuantileReducer(QuantileReducerBase):
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = x[0]
-        reduction_shape = self._get_reduction_shape(x)
-        return self._tensor_processor.quantile(x, self._quantile, reduction_shape, keepdims=True)
+        reduction_axes = self._get_reduction_axes(x)
+        return self._tensor_processor.quantile(x, self._quantile, reduction_axes, keepdims=self._keepdims)
 
 
 class AbsQuantileReducer(QuantileReducerBase):
     def __init__(
         self,
-        reduction_shape: Optional[ReductionShape] = None,
-        quantile: Union[float, List[float]] = 0.99,
+        reduction_axes: Optional[ReductionAxes] = None,
+        quantile: Optional[Union[float, List[float]]] = None,
         inplace: bool = False,
     ):
-        super().__init__(reduction_shape, quantile, False)
+        quantile = (0.99,) if quantile is None else quantile
+        super().__init__(reduction_axes=reduction_axes, quantile=quantile, inplace=False)
 
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
         x = self._tensor_processor.abs(x[0])
-        reduction_shape = self._get_reduction_shape(x)
-        return self._tensor_processor.quantile(x, [self._quantile], reduction_shape, keepdims=True)
+        reduction_axes = self._get_reduction_axes(x)
+        return self._tensor_processor.quantile(x, self._quantile, reduction_axes, keepdims=self._keepdims)
 
 
 class BatchMeanReducer(TensorReducerBase):
@@ -501,15 +559,15 @@ def __init__(self, channel_dim: int = 1, inplace: bool = False):
         super().__init__(channel_dim, inplace)
 
     def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
-        return [self._tensor_processor.mean_per_channel(x[0], self._reduction_shape)]
+        return [self._tensor_processor.mean_per_channel(x[0], self._reduction_axes)]
 
 
 ##################################################Aggregators##################################################
 
 
-class NoopAggregator(TensorAggregatorBase):
+class NoopAggregator(AggregatorBase):
     def __init__(self, num_samples: Optional[int]):
-        super().__init__(None, num_samples)
+        super().__init__(None, num_samples=num_samples)
 
     def _register_reduced_input_impl(self, x: TensorType) -> None:
         self._container.append(x.tensor)
@@ -518,9 +576,9 @@ def _aggregate_impl(self):
         return self._container
 
 
-class ShapeAggregator(TensorAggregatorBase):
+class ShapeAggregator(AggregatorBase):
     def __init__(self):
-        super().__init__(None, 1)
+        super().__init__(None, num_samples=1)
 
     def _register_reduced_input_impl(self, x: TensorType) -> None:
         self._container = x
@@ -529,74 +587,124 @@ def _aggregate_impl(self):
         return self._container.shape
 
 
-class MinAggregator(TensorAggregatorBase):
-    def _register_reduced_input_impl(self, x: TensorType) -> None:
-        if not self._container:
-            self._container = x
-        else:
-            self._container = self._tensor_processor.min(x, self._container)
+class TensorAggregatorBase(AggregatorBase, ABC):
+    def __init__(
+        self,
+        tensor_processor: NNCFCollectorTensorProcessor,
+        aggregation_axes: Optional[AggregationAxes] = None,
+        num_samples: Optional[int] = None,
+        window_size=None,
+    ):
+        super().__init__(tensor_processor, aggregation_axes=aggregation_axes, num_samples=num_samples)
+        self._window_size = window_size
+        self._container = deque(maxlen=window_size)
 
-    def _aggregate_impl(self):
-        return self._container.tensor
 
+class OnlineAggregatorBase(TensorAggregatorBase, ABC):
+    """
+    Base class for aggregators which are using aggregation function fn with following property:
+    fn([x1, x2, x3]) == fn([fn([x1, x2]), x3]) where x1, x2, x3 are samples to aggregate.
+    Online aggregation fn([fn([x1, x2]), x3]) allows to keep memory stamp low as only
+    one sample is stored during statistic collection.
+    """
 
-class MaxAggregator(TensorAggregatorBase):
-    def _register_reduced_input_impl(self, x: TensorType) -> None:
-        if not self._container:
-            self._container = x
+    def _register_reduced_input_impl(self, x: NNCFTensor) -> None:
+        online_aggregation_axes = tuple(dim - 1 for dim in self._aggregation_axes if dim != 0)
+        if online_aggregation_axes:
+            reduced = self._aggregation_fn(x, axis=online_aggregation_axes, keepdims=self._keepdims)
+        else:
+            reduced = x
+        if 0 in self._aggregation_axes:
+            if self._container:
+                reduced = self._aggregation_fn(
+                    self._tensor_processor.stack([reduced, self._container]), axis=0, keepdims=False
+                )
+            self._container = reduced
         else:
-            self._container = self._tensor_processor.max(x, self._container)
+            self._container.append(reduced)
 
-    def _aggregate_impl(self):
-        return self._container.tensor
+    def _aggregate_impl(self) -> NNCFTensor:
+        if 0 in self._aggregation_axes:
+            if self._keepdims:
+                return self._tensor_processor.stack([self._container]).tensor
+            return self._container.tensor
+        return self._tensor_processor.stack(self._container).tensor
+
+    @abstractmethod
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        pass
+
+
+class MinAggregator(OnlineAggregatorBase):
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        return self._tensor_processor.reduce_min(stacked_value, axis=axis, keepdims=keepdims)
+
+
+class MaxAggregator(OnlineAggregatorBase):
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        return self._tensor_processor.reduce_max(stacked_value, axis=axis, keepdims=keepdims)
 
 
 class OfflineAggregatorBase(TensorAggregatorBase, ABC):
-    def __init__(
-        self, tensor_processor, use_per_sample_stats: bool = False, num_samples: Optional[int] = None, window_size=None
-    ):
-        super().__init__(tensor_processor, num_samples)
-        self._window_size = window_size
-        self._container = deque(maxlen=window_size)
-        self._use_per_sample_stats = use_per_sample_stats
+    """
+    Base class for aggregators which are using aggregation function fn which
+    does not fulfill property fn([x1, x2, x3]) == fn([fn([x1, x2]), x3])
+    where x1, x2, x3 are samples to aggregate. Child aggregators collect
+    all samples in a container and aggregate them in one step.
+    """
 
     def _register_reduced_input_impl(self, x: TensorType) -> None:
-        if self._use_per_sample_stats:
-            self._container.extend(self._tensor_processor.unstack(x))
-        else:
-            self._container.append(x)
+        self._container.append(x)
 
-    def _offline_aggregation_impl(self, fn):
+    def _aggregate_impl(self) -> NNCFTensor:
         stacked_val = self._tensor_processor.stack(self._container)
-        return fn(stacked_val, axis=0, keepdims=False).tensor
+        return self._aggregation_fn(stacked_val, axis=self._aggregation_axes, keepdims=self._keepdims).tensor
+
+    @abstractmethod
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        pass
 
 
 class MeanAggregator(OfflineAggregatorBase):
-    def _aggregate_impl(self):
-        return self._offline_aggregation_impl(self._tensor_processor.mean)
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        return self._tensor_processor.mean(stacked_value, axis=axis, keepdims=keepdims)
 
 
 class MedianAggregator(OfflineAggregatorBase):
-    def _aggregate_impl(self):
-        return self._offline_aggregation_impl(self._tensor_processor.median)
+    def _aggregation_fn(self, stacked_value: NNCFTensor, axis: AggregationAxes, keepdims: bool) -> NNCFTensor:
+        return self._tensor_processor.median(stacked_value, axis=axis, keepdims=keepdims)
 
 
 class NoOutliersAggregatorBase(OfflineAggregatorBase, ABC):
     def __init__(
         self,
-        tensor_processor,
-        use_per_sample_stats: bool = False,
+        tensor_processor: NNCFCollectorTensorProcessor,
+        aggregation_axes: Optional[AggregationAxes] = None,
         num_samples: Optional[int] = None,
         window_size=None,
         quantile: float = 0.01,
     ):
-        super().__init__(tensor_processor, use_per_sample_stats, num_samples, window_size)
+        super().__init__(tensor_processor, aggregation_axes=aggregation_axes, num_samples=num_samples)
+        self._window_size = window_size
+        self._container = deque(maxlen=window_size)
         self._quantile = quantile
 
-    def _offline_aggregation_impl(self, fn) -> List[NNCFTensor]:
-        stacked_val = self._tensor_processor.stack(self._container)
-        result = self._tensor_processor.no_outliers_map(stacked_val, fn, axis=0, alpha=self._quantile)
-        return result.tensor
+    def _aggregate_impl(self) -> NNCFTensor:
+        stacked_samples = self._tensor_processor.stack(self._container)
+        low_values, high_values = self._tensor_processor.quantile(
+            stacked_samples, quantile=(self._quantile, 1 - self._quantile), axis=self._aggregation_axes
+        )
+        tp = self._tensor_processor
+        outliers_mask = tp.logical_or(tp.less(stacked_samples, low_values), tp.less(high_values, stacked_samples))
+        return self._aggregation_fn(
+            stacked_samples=stacked_samples, mask=outliers_mask, axis=self._aggregation_axes, keepdims=self._keepdims
+        ).tensor
+
+    @abstractmethod
+    def _aggregation_fn(
+        self, stacked_samples: NNCFTensor, mask: NNCFTensor, axis: AggregationAxes, keepdims: bool
+    ) -> NNCFTensor:
+        pass
 
     def __eq__(self, __o: object) -> bool:
         return super().__eq__(__o) and self._quantile == __o._quantile
@@ -606,13 +714,71 @@ def __hash__(self) -> int:
 
 
 class MeanNoOutliersAggregator(NoOutliersAggregatorBase):
-    def _aggregate_impl(self) -> Any:
-        return self._offline_aggregation_impl(self._tensor_processor.masked_mean)
+    def _aggregation_fn(
+        self, stacked_samples: NNCFTensor, mask: NNCFTensor, axis: AggregationAxes, keepdims: bool
+    ) -> NNCFTensor:
+        return self._tensor_processor.masked_mean(stacked_samples, axis=axis, mask=mask, keepdims=keepdims)
 
 
 class MedianNoOutliersAggregator(NoOutliersAggregatorBase):
-    def _aggregate_impl(self) -> Any:
-        return self._offline_aggregation_impl(self._tensor_processor.masked_median)
+    def _aggregation_fn(
+        self, stacked_samples: NNCFTensor, mask: NNCFTensor, axis: AggregationAxes, keepdims: bool
+    ) -> NNCFTensor:
+        return self._tensor_processor.masked_median(stacked_samples, axis=axis, mask=mask, keepdims=keepdims)
+
+
+class MedianAbsoluteDeviationAggregator(TensorAggregatorBase):
+    def _register_reduced_input_impl(self, x: TensorType) -> None:
+        return self._container.append(x)
+
+    def _aggregate_impl(self) -> Dict[str, NNCFTensor]:
+        stacked_val = self._tensor_processor.stack(self._container)
+
+        mask = self._tensor_processor.zero_elements(stacked_val)
+        median_per_ch = self._tensor_processor.masked_median(
+            stacked_val, mask=mask, axis=self._aggregation_axes, keepdims=True
+        )
+
+        mad_values = self._tensor_processor.median(
+            self._tensor_processor.abs(self._tensor_processor.sub(stacked_val, median_per_ch)),
+            axis=self._aggregation_axes,
+            keepdims=self._keepdims,
+        )
+        if not self._keepdims:
+            median_per_ch = self._tensor_processor.squeeze(median_per_ch, self._aggregation_axes)
+        return {
+            MedianMADTensorStatistic.MEDIAN_VALUES_STAT: median_per_ch.tensor,
+            MedianMADTensorStatistic.MAD_VALUES_STAT: mad_values.tensor,
+        }
+
+
+class PercentileAggregator(TensorAggregatorBase):
+    def __init__(
+        self,
+        tensor_processor: NNCFCollectorTensorProcessor,
+        percentiles_to_collect: List[float],
+        aggregation_axes: Optional[AggregationAxes] = None,
+        num_samples: Optional[int] = None,
+        window_size=None,
+    ):
+        super().__init__(tensor_processor, aggregation_axes=aggregation_axes, num_samples=num_samples)
+        self._percentiles_to_collect = percentiles_to_collect
+        self._window_size = window_size
+        self._container = deque(maxlen=window_size)
+
+    def _register_reduced_input_impl(self, x: TensorType) -> None:
+        return self._container.append(x)
+
+    def _aggregate_impl(self) -> Dict[float, NNCFTensor]:
+        stacked_val = self._tensor_processor.stack(self._container)
+
+        percentiles = self._tensor_processor.percentile(
+            stacked_val, self._percentiles_to_collect, axis=self._aggregation_axes, keepdims=self._keepdims
+        )
+        retval = {}
+        for idx, percentile in enumerate(self._percentiles_to_collect):
+            retval[percentile] = percentiles[idx].tensor
+        return retval
 
 
 AGGREGATORS_MAP = {
diff --git a/nncf/onnx/graph/node_utils.py b/nncf/onnx/graph/node_utils.py
index 5312f2fdded..6575dff6f1c 100644
--- a/nncf/onnx/graph/node_utils.py
+++ b/nncf/onnx/graph/node_utils.py
@@ -18,7 +18,7 @@
 from nncf.common.graph.graph import NNCFNode
 from nncf.common.graph.transformations.commands import TargetType
 from nncf.common.logging.logger import nncf_logger
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.onnx.graph.metatypes import onnx_metatypes as om
 from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXDequantizeLinearMetatype
 from nncf.onnx.graph.onnx_graph import ONNXGraph
@@ -131,7 +131,7 @@ def transpose_axis(shape: List[int], axis: int) -> int:
     return range(len(shape) - 1, -1, -1)[axis]  # Iterate backward throug axis
 
 
-def get_reduction_shape(shape: List[int], axis: int) -> ReductionShape:
+def get_reduction_shape(shape: List[int], axis: int) -> ReductionAxes:
     """
     Returns reduction shape for shape and axis.
 
diff --git a/nncf/onnx/statistics/collectors.py b/nncf/onnx/statistics/collectors.py
index 4afb2396a4b..7af2792f003 100644
--- a/nncf/onnx/statistics/collectors.py
+++ b/nncf/onnx/statistics/collectors.py
@@ -9,7 +9,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any, Callable, Deque, List, Optional, Union
+from typing import Deque, List, Optional, Tuple, Union
 
 import numpy as np
 
@@ -26,17 +26,18 @@
 from nncf.onnx.tensor import ONNXNNCFTensor
 
 
+# pylint: disable=too-many-public-methods
 class ONNXNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor):
     """
     A realization of the processing methods for ONNXNNCFTensors.
     """
 
     @staticmethod
-    def reduce_min(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
+    def reduce_min(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
         return ONNXNNCFTensor(np.amin(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
-    def reduce_max(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
+    def reduce_max(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
         return ONNXNNCFTensor(np.amax(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
@@ -52,16 +53,16 @@ def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
         return ONNXNNCFTensor(np.maximum(x1.tensor, x2.tensor))
 
     @staticmethod
-    def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
+    def mean(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor:
         return ONNXNNCFTensor(np.mean(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
-    def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
+    def median(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor:
         return ONNXNNCFTensor(np.median(x.tensor, axis=axis, keepdims=keepdims))
 
     @classmethod
     def masked_mean(
-        cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
+        cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
     ) -> NNCFTensor:
         if mask is None:
             return cls.mean(x, axis=axis, keepdims=keepdims)
@@ -70,32 +71,20 @@ def masked_mean(
 
     @classmethod
     def masked_median(
-        cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
+        cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
     ) -> NNCFTensor:
         if mask is None:
             return cls.median(x, axis=axis, keepdims=keepdims)
         masked_x = np.ma.array(x.tensor, mask=mask.tensor)
         return ONNXNNCFTensor(np.ma.median(masked_x, axis=axis, keepdims=keepdims).data)
 
-    @classmethod
-    def no_outliers_map(
-        cls,
-        x: NNCFTensor,
-        fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
-        axis: int = 0,
-        alpha: float = 0.01,
-        keepdims: bool = False,
-    ) -> NNCFTensor:
-        if len(x.shape) == 1:
-            return fn(x, axis=None, mask=None, keepdims=keepdims)
-
-        x = x.tensor
-        if axis:
-            x = np.moveaxis(x, axis, 0)
+    @staticmethod
+    def logical_or(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return ONNXNNCFTensor(np.logical_or(input_.tensor, other.tensor))
 
-        low_values, high_values = np.quantile(x, [alpha, 1 - alpha], 0)
-        outliers_mask = np.logical_or(x < low_values, high_values < x)
-        return fn(ONNXNNCFTensor(x), axis=0, mask=ONNXNNCFTensor(outliers_mask), keepdims=keepdims)
+    @staticmethod
+    def less(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return ONNXNNCFTensor(input_.tensor < other.tensor)
 
     @staticmethod
     def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFTensor:
@@ -106,17 +95,31 @@ def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFT
     def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
         return [ONNXNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]
 
+    @staticmethod
+    def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
+        raise NotImplementedError()
+
     @staticmethod
     def sum(tensor: NNCFTensor) -> TensorElementsType:
         return np.sum(tensor.tensor)
 
     @staticmethod
     def quantile(
-        tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
+        tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, Tuple, list], keepdims: bool = False
     ) -> List[TensorElementsType]:
         result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims)
         return [ONNXNNCFTensor(x) for x in result]
 
+    @classmethod
+    def percentile(
+        cls,
+        tensor: NNCFTensor,
+        percentile: Union[float, List[float]],
+        axis: Union[int, Tuple, list],
+        keepdims: bool = False,
+    ) -> List[TensorElementsType]:
+        raise NotImplementedError()
+
     @staticmethod
     def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
         if len(x.shape) < 3:
@@ -129,47 +132,64 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
     def batch_mean(x: NNCFTensor) -> NNCFTensor:
         return ONNXNNCFTensor(np.mean(x.tensor, axis=0, keepdims=True))
 
+    @staticmethod
+    def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
+        raise NotImplementedError()
+
+    @staticmethod
+    def zero_elements(x: NNCFTensor) -> NNCFTensor:
+        raise NotImplementedError()
+
 
 class ONNXMinMaxStatisticCollector(MinMaxStatisticCollector):
     @staticmethod
     def _get_processor() -> NNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     def _register_input(self, x: ONNXNNCFTensor):
         self._register_input_common(x)
 
     def _get_statistics(self) -> ONNXMinMaxTensorStatistic:
-        return ONNXMinMaxTensorStatistic(self._min_values.tensor, self._max_values.tensor)
+        return ONNXMinMaxTensorStatistic(
+            min_values=self._min_values.tensor,
+            max_values=self._max_values.tensor,
+        )
 
 
 class ONNXMeanMinMaxStatisticCollector(MeanMinMaxStatisticCollector):
     @staticmethod
     def _get_processor() -> NNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     def _register_input(self, x: ONNXNNCFTensor):
         self._register_input_common(x)
 
     def _get_statistics(self) -> ONNXMinMaxTensorStatistic:
-        return ONNXMinMaxTensorStatistic(self._min_aggregate().tensor, self._max_aggregate().tensor)
+        return ONNXMinMaxTensorStatistic(
+            min_values=self._min_aggregate().tensor,
+            max_values=self._max_aggregate().tensor,
+        )
 
 
 class ONNXMeanStatisticCollector(MeanStatisticCollector):
     @staticmethod
     def _get_processor() -> NNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     def _register_input(self, x: ONNXNNCFTensor):
         self._register_input_common(x)
 
     def _get_statistics(self) -> ONNXMeanTensorStatistic:
-        return ONNXMeanTensorStatistic(self._mean_aggregate().tensor, self._shape())
+        return ONNXMeanTensorStatistic(
+            mean_values=self._mean_aggregate().tensor,
+            shape=self._shape(),
+        )
 
 
 class ONNXRawStatisticCollector(RawStatisticCollector):
     @staticmethod
     def _get_processor() -> NNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     def _register_input(self, x: ONNXNNCFTensor):
         self._register_input_common(x)
diff --git a/nncf/openvino/statistics/collectors.py b/nncf/openvino/statistics/collectors.py
index 61ef776fab7..c02fa68dcc7 100644
--- a/nncf/openvino/statistics/collectors.py
+++ b/nncf/openvino/statistics/collectors.py
@@ -9,7 +9,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any, Callable, Deque, List, Optional, Union
+from typing import Deque, List, Optional, Tuple, Union
 
 import numpy as np
 
@@ -43,17 +43,18 @@
 from nncf.quantization.advanced_parameters import StatisticsType
 
 
+# pylint: disable=too-many-public-methods
 class OVNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor):
     """
     A realization of the processing methods for OVNNCFTensors.
     """
 
     @staticmethod
-    def reduce_min(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = True) -> NNCFTensor:
+    def reduce_min(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = True) -> NNCFTensor:
         return OVNNCFTensor(np.amin(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
-    def reduce_max(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = True) -> NNCFTensor:
+    def reduce_max(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = True) -> NNCFTensor:
         return OVNNCFTensor(np.amax(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
@@ -69,30 +70,36 @@ def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
         return OVNNCFTensor(np.maximum(x1.tensor, x2.tensor))
 
     @staticmethod
-    def mean(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = False) -> NNCFTensor:
+    def mean(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = False) -> NNCFTensor:
         return OVNNCFTensor(np.mean(x.tensor, axis=axis, keepdims=keepdims))
 
     @staticmethod
-    def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
+    def median(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
         return OVNNCFTensor(np.median(x.tensor, axis=axis, keepdims=keepdims))
 
     @classmethod
     def masked_mean(
-        cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
+        cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
     ) -> NNCFTensor:
         if mask is None:
             return cls.mean(x, axis=axis, keepdims=keepdims)
         masked_x = np.ma.array(x.tensor, mask=mask.tensor)
-        return OVNNCFTensor(np.ma.mean(masked_x, axis=axis, keepdims=False).data)
+        result = np.ma.mean(masked_x, axis=axis, keepdims=keepdims)
+        if isinstance(result, np.ma.MaskedArray):
+            return OVNNCFTensor(result.data)
+        return OVNNCFTensor(result)
 
     @classmethod
     def masked_median(
-        cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
+        cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
     ) -> NNCFTensor:
         if mask is None:
             return cls.median(x, axis=axis, keepdims=keepdims)
         masked_x = np.ma.array(x.tensor, mask=mask.tensor)
-        return OVNNCFTensor(np.ma.median(masked_x, axis=axis, keepdims=keepdims).data)
+        result = np.ma.median(masked_x, axis=axis, keepdims=keepdims)
+        if isinstance(result, np.ma.MaskedArray):
+            return OVNNCFTensor(result.data)
+        return OVNNCFTensor(result)
 
     @staticmethod
     def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
@@ -102,30 +109,18 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
         t = x.reshape(x.shape[0], x.shape[1], -1)
         return OVNNCFTensor(np.mean(t, axis=(0, 2)))
 
-    @classmethod
-    def no_outliers_map(
-        cls,
-        x: NNCFTensor,
-        fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
-        axis: int = 0,
-        alpha: float = 0.01,
-        keepdims: bool = False,
-    ) -> NNCFTensor:
-        if len(x.shape) == 1:
-            return fn(x, axis=None, mask=None, keepdims=keepdims)
-
-        x = x.tensor
-        if axis:
-            x = np.moveaxis(x, axis, 0)
-
-        low_values, high_values = np.quantile(x, [alpha, 1 - alpha], 0)
-        outliers_mask = np.logical_or(x < low_values, high_values < x)
-        return fn(OVNNCFTensor(x), axis=0, mask=OVNNCFTensor(outliers_mask), keepdims=keepdims)
-
     @staticmethod
     def batch_mean(x: NNCFTensor) -> NNCFTensor:
         return OVNNCFTensor(np.mean(x.tensor, axis=0, keepdims=True))
 
+    @staticmethod
+    def logical_or(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return OVNNCFTensor(np.logical_or(input_.tensor, other.tensor))
+
+    @staticmethod
+    def less(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return OVNNCFTensor(input_.tensor < other.tensor)
+
     @staticmethod
     def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFTensor:
         x = [t.tensor for t in x]
@@ -135,17 +130,42 @@ def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFT
     def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
         return [OVNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]
 
+    @staticmethod
+    def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
+        return OVNNCFTensor(np.squeeze(x.tensor, axis=dim))
+
     @staticmethod
     def sum(tensor: NNCFTensor) -> TensorElementsType:
         return np.sum(tensor.tensor)
 
     @staticmethod
     def quantile(
-        tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
+        tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, Tuple, list], keepdims: bool = False
     ) -> List[NNCFTensor]:
         result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims)
         return [OVNNCFTensor(x) for x in result]
 
+    @classmethod
+    def percentile(
+        cls,
+        tensor: NNCFTensor,
+        percentile: Union[float, List[float]],
+        axis: Union[int, tuple, list],
+        keepdims: bool = False,
+    ) -> List[TensorElementsType]:
+        quantile = np.true_divide(percentile, 100)
+        return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims)
+
+    @staticmethod
+    def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
+        return NNCFTensor(a.tensor - b.tensor)
+
+    @staticmethod
+    def zero_elements(x: NNCFTensor) -> NNCFTensor:
+        np_tensor = x.tensor
+        eps = np.finfo(np_tensor.dtype).eps
+        return NNCFTensor(np.abs(np_tensor) < eps)
+
 
 class OVNoopReducer(NoopReducer):
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
@@ -157,7 +177,7 @@ def _get_processor(self):
         return OVNNCFCollectorTensorProcessor
 
     def get_inplace_fn(self):
-        return get_inplace_min_op(self.name, self._reduction_shape)
+        return get_inplace_min_op(self.name, self._reduction_axes)
 
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
         return get_reducer_output_node_names(self.name, target_node_name, port_id, self.output_port_id, self.inplace)
@@ -168,7 +188,7 @@ def _get_processor(self):
         return OVNNCFCollectorTensorProcessor
 
     def get_inplace_fn(self):
-        return get_inplace_max_op(self.name, self._reduction_shape, False)
+        return get_inplace_max_op(self.name, self._reduction_axes, False)
 
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
         return get_reducer_output_node_names(self.name, target_node_name, port_id, self.output_port_id, self.inplace)
@@ -179,7 +199,7 @@ def _get_processor(self):
         return OVNNCFCollectorTensorProcessor
 
     def get_inplace_fn(self):
-        return get_inplace_max_op(self.name, self._reduction_shape, True)
+        return get_inplace_max_op(self.name, self._reduction_axes, True)
 
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
         return get_reducer_output_node_names(self.name, target_node_name, port_id, self.output_port_id, self.inplace)
@@ -190,7 +210,7 @@ def _get_processor(self):
         return OVNNCFCollectorTensorProcessor
 
     def get_inplace_fn(self):
-        return get_inplace_mean_op(self.name, self._reduction_shape)
+        return get_inplace_mean_op(self.name, self._reduction_axes)
 
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
         return get_reducer_output_node_names(self.name, target_node_name, port_id, self.output_port_id, self.inplace)
@@ -212,7 +232,7 @@ def _get_processor(self):
         return OVNNCFCollectorTensorProcessor
 
     def get_inplace_fn(self):
-        return get_inplace_mean_per_ch(self.name, self._reduction_shape)
+        return get_inplace_mean_per_ch(self.name, self._reduction_axes)
 
     def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
         return get_reducer_output_node_names(self.name, target_node_name, port_id, self.output_port_id, self.inplace)
@@ -252,7 +272,6 @@ def get_mean_stat_collector(num_samples, channel_axis, window_size=None, inplace
 
     kwargs = {
         "tensor_processor": OVNNCFCollectorTensorProcessor,
-        "use_per_sample_stats": False,
         "num_samples": num_samples,
         "window_size": window_size,
     }
diff --git a/nncf/quantization/algorithms/bias_correction/backend.py b/nncf/quantization/algorithms/bias_correction/backend.py
index a85f2fffb0a..08e9a8fbd27 100644
--- a/nncf/quantization/algorithms/bias_correction/backend.py
+++ b/nncf/quantization/algorithms/bias_correction/backend.py
@@ -21,7 +21,7 @@
 from nncf.common.graph.transformations.commands import TargetType
 from nncf.common.graph.transformations.commands import TransformationCommand
 from nncf.common.tensor import NNCFTensor
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.common.utils.registry import Registry
 
@@ -87,7 +87,7 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: TargetPoint) -
     @staticmethod
     @abstractmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/bias_correction/onnx_backend.py b/nncf/quantization/algorithms/bias_correction/onnx_backend.py
index 0e9ad720a10..0b45a16309e 100644
--- a/nncf/quantization/algorithms/bias_correction/onnx_backend.py
+++ b/nncf/quantization/algorithms/bias_correction/onnx_backend.py
@@ -17,7 +17,7 @@
 from nncf.common.graph import NNCFGraph
 from nncf.common.graph import NNCFNode
 from nncf.common.graph.transformations.commands import TargetType
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
 from nncf.onnx.graph.model_utils import remove_fq_from_inputs
 from nncf.onnx.graph.node_utils import get_bias_value
@@ -43,7 +43,7 @@
 class ONNXBiasCorrectionAlgoBackend(BiasCorrectionAlgoBackend):
     @property
     def tensor_processor(self) -> ONNXNNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     @property
     def types_to_insert_bias(self):
@@ -77,7 +77,7 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: ONNXTargetPoin
 
     @staticmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/bias_correction/openvino_backend.py b/nncf/quantization/algorithms/bias_correction/openvino_backend.py
index c57f9df6a20..29d33131f8e 100644
--- a/nncf/quantization/algorithms/bias_correction/openvino_backend.py
+++ b/nncf/quantization/algorithms/bias_correction/openvino_backend.py
@@ -17,7 +17,7 @@
 from nncf.common.graph import NNCFGraph
 from nncf.common.graph import NNCFNode
 from nncf.common.graph.transformations.commands import TargetType
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
 from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.openvino.graph.metatypes.groups import FAKE_QUANTIZE_OPERATIONS
@@ -65,7 +65,7 @@ def output_insertion_command(nncf_graph: NNCFGraph, target_point: OVTargetPoint)
 
     @staticmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/fast_bias_correction/backend.py b/nncf/quantization/algorithms/fast_bias_correction/backend.py
index ca25adc2fb2..e95c25c9cb7 100644
--- a/nncf/quantization/algorithms/fast_bias_correction/backend.py
+++ b/nncf/quantization/algorithms/fast_bias_correction/backend.py
@@ -21,7 +21,7 @@
 from nncf.common.graph.transformations.commands import TargetType
 from nncf.common.graph.transformations.commands import TransformationCommand
 from nncf.common.tensor import NNCFTensor
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.common.utils.registry import Registry
 
@@ -79,7 +79,7 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> Transform
     @staticmethod
     @abstractmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py
index 733018e9bd2..96f57e77e05 100644
--- a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py
+++ b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py
@@ -17,7 +17,7 @@
 from nncf.common.graph import NNCFGraph
 from nncf.common.graph import NNCFNode
 from nncf.common.graph.transformations.commands import TargetType
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
 from nncf.onnx.graph.node_utils import get_bias_value
 from nncf.onnx.graph.node_utils import is_any_weight_quantized
@@ -42,7 +42,7 @@ def types_to_insert_bias(self):
 
     @property
     def tensor_processor(self) -> ONNXNNCFCollectorTensorProcessor:
-        return ONNXNNCFCollectorTensorProcessor()
+        return ONNXNNCFCollectorTensorProcessor
 
     @staticmethod
     def target_point(target_type: TargetType, target_node_name: str, port_id: int) -> ONNXTargetPoint:
@@ -64,7 +64,7 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> ONNXModel
 
     @staticmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py
index 9436a7544f1..e4c77f56570 100644
--- a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py
+++ b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py
@@ -17,7 +17,7 @@
 from nncf.common.graph import NNCFGraph
 from nncf.common.graph import NNCFNode
 from nncf.common.graph.transformations.commands import TargetType
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
 from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.openvino.graph.metatypes.groups import FAKE_QUANTIZE_OPERATIONS
@@ -56,7 +56,7 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> OVModelEx
 
     @staticmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
diff --git a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py
index b7316724db0..e6c3bf0dd25 100644
--- a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py
+++ b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py
@@ -18,8 +18,9 @@
 from nncf.common.graph import NNCFNode
 from nncf.common.graph.definitions import NNCFGraphNodeType
 from nncf.common.graph.transformations.commands import TargetType
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.quantization.algorithms.fast_bias_correction.backend import ALGO_BACKENDS
 from nncf.quantization.algorithms.fast_bias_correction.backend import FastBiasCorrectionAlgoBackend
 from nncf.torch.graph.transformations.command_creation import create_bias_correction_command
@@ -32,8 +33,8 @@
 from nncf.torch.model_analyzer import is_quantized_weights
 from nncf.torch.nncf_network import NNCFNetwork
 from nncf.torch.tensor import PTNNCFTensor
-from nncf.torch.tensor_statistics.collectors import PTMeanStatisticCollector
 from nncf.torch.tensor_statistics.collectors import PTNNCFCollectorTensorProcessor
+from nncf.torch.tensor_statistics.collectors import get_mean_statisitic_collector
 
 
 @ALGO_BACKENDS.register(BackendType.TORCH)
@@ -45,7 +46,7 @@ class PTFastBiasCorrectionAlgoBackend(FastBiasCorrectionAlgoBackend):
 
     @property
     def tensor_processor(self) -> PTNNCFCollectorTensorProcessor:
-        return PTNNCFCollectorTensorProcessor()
+        return PTNNCFCollectorTensorProcessor
 
     @staticmethod
     def target_point(target_type: TargetType, target_node_name: str, port_id: int) -> PTTargetPoint:
@@ -67,12 +68,12 @@ def model_extraction_command(inputs: List[str], outputs: List[str]) -> PTModelEx
 
     @staticmethod
     def mean_statistic_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         inplace: bool,
         num_samples: Optional[int] = None,
         window_size: Optional[int] = None,
-    ) -> PTMeanStatisticCollector:
-        return PTMeanStatisticCollector(reduction_shape, num_samples, window_size)
+    ) -> TensorCollector:
+        return get_mean_statisitic_collector(num_samples, reduction_shape, window_size)
 
     @staticmethod
     def get_sub_input_output_names(subgraph: NNCFNetwork) -> Tuple[str, str]:
diff --git a/nncf/quantization/algorithms/min_max/openvino_backend.py b/nncf/quantization/algorithms/min_max/openvino_backend.py
index 4394c1870fa..89450446ea9 100644
--- a/nncf/quantization/algorithms/min_max/openvino_backend.py
+++ b/nncf/quantization/algorithms/min_max/openvino_backend.py
@@ -20,7 +20,7 @@
 from nncf.common.hardware.config import HWConfig
 from nncf.common.quantization.structs import QuantizationMode
 from nncf.common.quantization.structs import QuantizerConfig
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.backend import BackendType
 from nncf.experimental.common.tensor_statistics.collectors import AGGREGATORS_MAP
 from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
@@ -124,7 +124,7 @@ def unify_statistics(statistics: List[OVMinMaxTensorStatistic]) -> OVMinMaxTenso
     @staticmethod
     def _get_reduction_shape_and_use_abs_max(
         nncf_graph: NNCFGraph, target_point: OVTargetPoint, quantizer_config: QuantizerConfig
-    ) -> Tuple[ReductionShape, bool]:
+    ) -> Tuple[ReductionAxes, bool]:
         use_abs_max = quantizer_config.mode == QuantizationMode.SYMMETRIC
         if not quantizer_config.per_channel:
             return None, use_abs_max
@@ -181,7 +181,7 @@ def get_statistic_collector(
                     f"Aggregator type: {params.aggregator_type} is not supported for OpenVino PTQ backend yet."
                 )
 
-            kwargs = {"reduction_shape": reduction_shape, "inplace": inplace}
+            kwargs = {"reduction_axes": reduction_shape, "inplace": inplace}
             if params.statistics_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]:
                 if container_key == OVMinMaxTensorStatistic.MIN_STAT:
                     quantile = params.quantile_outlier_prob
diff --git a/nncf/quantization/algorithms/min_max/torch_backend.py b/nncf/quantization/algorithms/min_max/torch_backend.py
index 620a276eaf1..8b9d0f57d98 100644
--- a/nncf/quantization/algorithms/min_max/torch_backend.py
+++ b/nncf/quantization/algorithms/min_max/torch_backend.py
@@ -9,7 +9,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Dict, List, Optional, Set, Tuple, Union
+from typing import Dict, List, Optional, Set, Tuple
 
 import numpy as np
 import torch
@@ -24,13 +24,13 @@
 from nncf.common.graph.transformations.commands import TargetType
 from nncf.common.graph.transformations.commands import TransformationPriority
 from nncf.common.hardware.config import HWConfig
-from nncf.common.quantization.initialization.range import RangeInitConfig
 from nncf.common.quantization.structs import QuantizationMode
 from nncf.common.quantization.structs import QuantizerConfig
 from nncf.common.utils.backend import BackendType
+from nncf.experimental.common.tensor_statistics.collectors import AGGREGATORS_MAP
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.parameters import ModelType
 from nncf.parameters import TargetDevice
-from nncf.quantization.advanced_parameters import AggregatorType
 from nncf.quantization.advanced_parameters import StatisticsType
 from nncf.quantization.algorithms.min_max.backend import ALGO_BACKENDS
 from nncf.quantization.algorithms.min_max.backend import MinMaxAlgoBackend
@@ -43,14 +43,13 @@
 from nncf.torch.nncf_network import NNCFNetwork
 from nncf.torch.quantization.default_quantization import DEFAULT_PT_QUANT_TRAIT_TO_OP_DICT
 from nncf.torch.quantization.init_range import PTRangeInitCollectorParams
-from nncf.torch.quantization.init_range import StatCollectorGenerator
 from nncf.torch.quantization.layers import QUANTIZATION_MODULES
 from nncf.torch.quantization.layers import AsymmetricQuantizer
 from nncf.torch.quantization.layers import BaseQuantizer
 from nncf.torch.quantization.layers import PTQuantizerSpec
 from nncf.torch.quantization.layers import get_scale_shape
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
+from nncf.torch.tensor_statistics.collectors import PT_REDUCERS_MAP
+from nncf.torch.tensor_statistics.collectors import PTNNCFCollectorTensorProcessor
 from nncf.torch.tensor_statistics.statistics import PTMinMaxTensorStatistic
 
 
@@ -155,32 +154,47 @@ def get_statistic_collector(
         quantizer_config: QuantizerConfig,
         inplace: bool,
         num_samples: int = None,
-    ) -> Union[PTMinMaxStatisticCollector, PTMeanMinMaxStatisticCollector]:
-        if (
-            range_estimator_params.min.statistics_type == StatisticsType.MIN
-            and range_estimator_params.min.aggregator_type == AggregatorType.MIN
-            and range_estimator_params.max.statistics_type == StatisticsType.MAX
-            and range_estimator_params.max.aggregator_type == AggregatorType.MAX
+    ) -> TensorCollector:
+        collector_params = PTMinMaxAlgoBackend._default_collector_params(nncf_graph, target_point, quantizer_config)
+        reduction_axes = collector_params.get_reduction_axes(per_sample_stats=False)
+        aggregation_axes = collector_params.get_aggregation_axes(per_sample_stats=False)
+
+        collector = TensorCollector(PTMinMaxTensorStatistic)
+        for params, container_key in zip(
+            [range_estimator_params.min, range_estimator_params.max],
+            [PTMinMaxTensorStatistic.MIN_STAT, PTMinMaxTensorStatistic.MAX_STAT],
         ):
-            collector_name = "min_max"
-
-        elif (
-            range_estimator_params.min.statistics_type == StatisticsType.MIN
-            and range_estimator_params.min.aggregator_type == AggregatorType.MEAN
-            and range_estimator_params.max.statistics_type == StatisticsType.MAX
-            and range_estimator_params.max.aggregator_type == AggregatorType.MEAN
-        ):
-            collector_name = "mean_min_max"
-
-        else:
-            raise RuntimeError(
-                "The following range estimator parameters are not supported by PyTorch backend by now: "
-                f"{str(range_estimator_params)}"
+            if not params.statistics_type in PT_REDUCERS_MAP:
+                raise RuntimeError(
+                    f"Statistic type: {params.statistics_type} is not supported for Torch PTQ backend yet."
+                )
+
+            if not params.aggregator_type in AGGREGATORS_MAP:
+                raise RuntimeError(
+                    f"Aggregator type: {params.aggregator_type} is not supported for Torch PTQ backend yet."
+                )
+
+            statistic_type = params.statistics_type
+            if statistic_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]:
+                # TODO(dlyakhov): merge two quantile aggregators in one
+                if container_key == PTMinMaxTensorStatistic.MIN_STAT:
+                    quantile = params.quantile_outlier_prob
+                else:
+                    quantile = 1 - params.quantile_outlier_prob
+                reducer = PT_REDUCERS_MAP[statistic_type](reduction_axes=reduction_axes, quantile=[quantile])
+            else:
+                if collector_params.use_abs_max and statistic_type == StatisticsType.MAX:
+                    statistic_type = StatisticsType.ABS_MAX
+                reducer = PT_REDUCERS_MAP[statistic_type](reduction_axes=reduction_axes)
+
+            aggregator = AGGREGATORS_MAP[params.aggregator_type](
+                aggregation_axes=aggregation_axes,
+                num_samples=num_samples,
+                tensor_processor=PTNNCFCollectorTensorProcessor,
             )
 
-        return PTMinMaxAlgoBackend._statistic_collector_builder(
-            collector_name, nncf_graph, target_point, quantizer_config, num_samples
-        )
+            collector.register_statistic_branch(container_key, reducer, aggregator)
+        return collector
 
     @staticmethod
     def get_weight_tensor_port_ids(node: NNCFNode) -> List[Optional[int]]:
@@ -223,37 +237,18 @@ def _get_input_scale_shape(
         return input_shape, scale_shape, channel_idx
 
     @staticmethod
-    def _default_collector_params_and_scale_shape(
+    def _default_collector_params(
         nncf_graph: NNCFGraph, target_point: PTTargetPoint, quantizer_config: QuantizerConfig
-    ) -> Tuple[PTRangeInitCollectorParams, Tuple[int, ...]]:
-        input_shape, scale_shape, channel_idx = PTMinMaxAlgoBackend._get_input_scale_shape(
-            nncf_graph, target_point, quantizer_config
-        )
-        return (
-            PTRangeInitCollectorParams(
-                is_weights=target_point.is_weight_target_point(),
-                mode=quantizer_config.mode,
-                per_channel=quantizer_config.per_channel,
-                input_shape=input_shape,
-                channel_idx=channel_idx,
-            ),
-            scale_shape,
-        )
-
-    @staticmethod
-    def _statistic_collector_builder(
-        collector_name: str,
-        nncf_graph: NNCFGraph,
-        target_point: PTTargetPoint,
-        quantizer_config: QuantizerConfig,
-        num_samples: int = None,
-    ) -> PTMeanMinMaxStatisticCollector:
-        collector_params, scale_shape = PTMinMaxAlgoBackend._default_collector_params_and_scale_shape(
+    ) -> PTRangeInitCollectorParams:
+        input_shape, _, channel_idx = PTMinMaxAlgoBackend._get_input_scale_shape(
             nncf_graph, target_point, quantizer_config
         )
-        init_config = RangeInitConfig(collector_name, num_samples)
-        return StatCollectorGenerator.generate_stat_collector_for_range_init_config(
-            init_config, scale_shape, collector_params, num_samples
+        return PTRangeInitCollectorParams(
+            is_weights=target_point.is_weight_target_point(),
+            mode=quantizer_config.mode,
+            per_channel=quantizer_config.per_channel,
+            input_shape=input_shape,
+            channel_idx=channel_idx,
         )
 
     @staticmethod
diff --git a/nncf/quantization/algorithms/smooth_quant/algorithm.py b/nncf/quantization/algorithms/smooth_quant/algorithm.py
index a9ccdef9e10..7e216ab9ef9 100644
--- a/nncf/quantization/algorithms/smooth_quant/algorithm.py
+++ b/nncf/quantization/algorithms/smooth_quant/algorithm.py
@@ -232,11 +232,11 @@ def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPoin
                 target_node_name=node_to_smooth.node_name,
                 port_id=node_data["input_act_port"],
             )
-            input_reduction_shape = self._calculate_input_reduction_shape(
+            input_reduction_axes = self._calculate_input_reduction_axes(
                 graph, node_to_smooth, node_data["input_act_port"]
             )
             stat_collector = self._backend_entity.get_abs_max_channel_collector(
-                self._subset_size, input_reduction_shape, self._inplace_statistics, STATISTIC_BRANCH_KEY
+                self._subset_size, input_reduction_axes, self._inplace_statistics, STATISTIC_BRANCH_KEY
             )
             statistic_container.add_statistic_point(
                 StatisticPoint(
@@ -316,14 +316,14 @@ def _calculate_weight_scale(self, scale_value: TTensor, node: NNCFNode) -> TTens
             return self._backend_entity.calculate_weight_scale(scale_value, weights_size, channel_axis)
         return scale_value
 
-    def _calculate_input_reduction_shape(self, nncf_graph: NNCFGraph, node: NNCFNode, input_port: int) -> Tuple[int]:
+    def _calculate_input_reduction_axes(self, nncf_graph: NNCFGraph, node: NNCFNode, input_port: int) -> Tuple[int]:
         """
-        Returns reduction shape for specified input.
+        Returns reduction axes for specified input.
 
         :param nncf_graph: NNCFGraph instance.
         :param node: NNCFNode to check.
         :param input_port: Specified input port id.
-        :return: Calculated reduction shape.
+        :return: Calculated reduction axes.
         """
         shape = nncf_graph.get_input_edges(node)[input_port].tensor_shape
         reduction_shape = tuple([0])
diff --git a/nncf/quantization/algorithms/smooth_quant/backend.py b/nncf/quantization/algorithms/smooth_quant/backend.py
index dda7fc44d2d..fca8aa7a4e3 100644
--- a/nncf/quantization/algorithms/smooth_quant/backend.py
+++ b/nncf/quantization/algorithms/smooth_quant/backend.py
@@ -82,12 +82,12 @@ def get_channel_agnostic_reduction_shape(channel_axis: int, shape: Tuple[int]) -
     @staticmethod
     @abstractmethod
     def get_abs_max_channel_collector(
-        num_samples: int, stats_reduction_shape: Tuple[int], inplace: bool, branch_key: str
+        num_samples: int, stats_reduction_axes: Tuple[int], inplace: bool, branch_key: str
     ) -> TensorCollector:
         """
         Returns TensorCollector with MaxAggregator and AbsMaxReducer.
 
-        :param stats_reduction_shape: Calculated reduction shape.
+        :param stats_reduction_axes: Calculated reduction shape.
         :param inplace: Whether to calculate statistic inplace or not.
         :param branch_key: Specific string for branch key.
         :return: TensorCollector instance.
diff --git a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py
index 415ea5a626a..80a04c4580d 100644
--- a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py
+++ b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py
@@ -66,11 +66,11 @@ def get_channel_agnostic_reduction_shape(channel_axis: int, shape: Tuple[int]) -
 
     @staticmethod
     def get_abs_max_channel_collector(
-        num_samples: int, stats_reduction_shape: Tuple[int], inplace: bool, branch_key: str
+        num_samples: int, stats_reduction_axes: Tuple[int], inplace: bool, branch_key: str
     ) -> TensorCollector:
         collector = TensorCollector()
-        reducer = OVAbsMaxReducer(stats_reduction_shape, inplace)
-        aggregator = MaxAggregator(OVNNCFCollectorTensorProcessor, num_samples)
+        reducer = OVAbsMaxReducer(reduction_axes=stats_reduction_axes, inplace=inplace)
+        aggregator = MaxAggregator(tensor_processor=OVNNCFCollectorTensorProcessor, num_samples=num_samples)
         collector.register_statistic_branch(branch_key, reducer, aggregator)
         return collector
 
diff --git a/nncf/scopes.py b/nncf/scopes.py
index 4b6adcd03cf..7145d33a4a3 100644
--- a/nncf/scopes.py
+++ b/nncf/scopes.py
@@ -121,7 +121,7 @@ def get_ignored_node_names_from_ignored_scope(
             raise RuntimeError(
                 f"Ignored nodes with name {list(skipped_names)} were not found in the NNCFGraph. " + error_msg
             )
-        nncf_logger.info(f"{len(matched_by_names)} ignored nodes was found by name in the NNCFGraph")
+        nncf_logger.info(f"{len(matched_by_names)} ignored nodes were found by name in the NNCFGraph")
 
     matched_by_patterns = []
     if ignored_scope.patterns:
@@ -134,7 +134,7 @@ def get_ignored_node_names_from_ignored_scope(
             matched_by_patterns.extend(matches)
         if strict and not_matched_patterns:
             raise RuntimeError(f"No matches for ignored patterns {not_matched_patterns} in the NNCFGraph. " + error_msg)
-        nncf_logger.info(f"{len(matched_by_patterns)} ignored nodes was found by patterns in the NNCFGraph")
+        nncf_logger.info(f"{len(matched_by_patterns)} ignored nodes were found by patterns in the NNCFGraph")
 
     matched_by_types = []
     if ignored_scope.types:
@@ -148,6 +148,6 @@ def get_ignored_node_names_from_ignored_scope(
             raise RuntimeError(
                 f"Nodes with ignored types {list(not_matched_types)} were not found in the NNCFGraph. " + error_msg
             )
-        nncf_logger.info(f"{len(matched_by_types)} ignored nodes was found by types in the NNCFGraph")
+        nncf_logger.info(f"{len(matched_by_types)} ignored nodes were found by types in the NNCFGraph")
 
     return set(matched_by_names + matched_by_types + matched_by_patterns)
diff --git a/nncf/tensorflow/quantization/init_range.py b/nncf/tensorflow/quantization/init_range.py
index 2d6bff0a405..900f3248134 100644
--- a/nncf/tensorflow/quantization/init_range.py
+++ b/nncf/tensorflow/quantization/init_range.py
@@ -23,7 +23,7 @@
 from nncf.common.quantization.initialization.range import RangeInitParams
 from nncf.common.quantization.structs import QuantizerGroup
 from nncf.common.scopes import should_consider_scope
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.config.schemata.defaults import MAX_PERCENTILE
 from nncf.config.schemata.defaults import MIN_PERCENTILE
@@ -99,7 +99,7 @@ def __init__(self, range_init_params: TFRangeInitParams):
 
     @staticmethod
     def generate_stat_collector(
-        reduction_shape: ReductionShape,
+        reduction_shape: ReductionAxes,
         collector_params: RangeInitCollectorParams,
         init_config: RangeInitConfig,
         num_samples_to_collect_override: int = None,
diff --git a/nncf/tensorflow/tensor_statistics/collectors.py b/nncf/tensorflow/tensor_statistics/collectors.py
index 0b47a605eff..d3dd952e9cf 100644
--- a/nncf/tensorflow/tensor_statistics/collectors.py
+++ b/nncf/tensorflow/tensor_statistics/collectors.py
@@ -9,7 +9,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any, Callable, Deque, List, Optional, Union
+from typing import Deque, List, Optional, Tuple, Union
 
 import numpy as np
 import tensorflow as tf
@@ -64,12 +64,22 @@ def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTe
     def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
         raise NotImplementedError()
 
+    @classmethod
+    def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+        raise NotImplementedError()
+
+    @classmethod
+    def masked_median(
+        cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False
+    ) -> NNCFTensor:
+        raise NotImplementedError()
+
     @staticmethod
-    def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+    def logical_or(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
         raise NotImplementedError()
 
     @staticmethod
-    def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+    def less(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
         raise NotImplementedError()
 
     @staticmethod
@@ -85,6 +95,10 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
         tensor_list = tf.unstack(tensor, axis=axis)
         return [TFNNCFTensor(t) for t in tensor_list]
 
+    @staticmethod
+    def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
+        raise NotImplementedError()
+
     @staticmethod
     def sum(tensor: NNCFTensor) -> TensorElementsType:
         return tf.reduce_sum(tensor.tensor).numpy()
@@ -95,14 +109,26 @@ def quantile(
     ) -> List[NNCFTensor]:
         raise NotImplementedError()
 
+    @classmethod
+    def percentile(
+        cls,
+        tensor: NNCFTensor,
+        percentile: Union[float, List[float]],
+        axis: Union[int, tuple, list],
+        keepdims: bool = False,
+    ) -> List[TensorElementsType]:
+        raise NotImplementedError()
+
     @staticmethod
     def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
         raise NotImplementedError()
 
-    @classmethod
-    def no_outliers_map(
-        cls, x: NNCFTensor, fn: Callable[[NNCFTensor, Optional[int]], Any], axis: int = 0, alpha: float = 0.01
-    ):
+    @staticmethod
+    def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
+        raise NotImplementedError()
+
+    @staticmethod
+    def zero_elements(x: NNCFTensor) -> NNCFTensor:
         raise NotImplementedError()
 
 
diff --git a/nncf/tensorflow/tensor_statistics/reduction.py b/nncf/tensorflow/tensor_statistics/reduction.py
index d43afddf947..00229d88be5 100644
--- a/nncf/tensorflow/tensor_statistics/reduction.py
+++ b/nncf/tensorflow/tensor_statistics/reduction.py
@@ -13,7 +13,7 @@
 
 import tensorflow as tf
 
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.tensorflow.layers.data_layout import get_weight_shape
 
 
@@ -28,7 +28,7 @@ def get_axes(ndims: int, per_channel: bool, channel_axes: Union[int, list, tuple
 
 def get_reduction_shape_activations(
     layer: tf.keras.layers.Layer, channel_axes: Union[int, tuple, list], use_per_sample_stats: bool
-) -> ReductionShape:
+) -> ReductionAxes:
     ndims = len(layer.get_input_shape_at(0))
     channel_axes_ = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
     reduction_shape = get_axes(ndims, layer.per_channel, channel_axes_)
@@ -39,7 +39,7 @@ def get_reduction_shape_activations(
 
 def get_reduction_shape_weights(
     layer: tf.keras.layers.Layer, weight_attr: str, channel_axes: Union[int, tuple, list], per_channel: bool
-) -> ReductionShape:
+) -> ReductionAxes:
     weight_shape = get_weight_shape(layer, weight_attr)
     ndims = len(weight_shape)
     channel_axes_ = channel_axes if isinstance(channel_axes, (list, tuple)) else [channel_axes]
@@ -47,7 +47,7 @@ def get_reduction_shape_weights(
     return tuple(reduction_shape)
 
 
-def convert_rs_to_pt_type(input_shape: Tuple[int], reduction_shape: ReductionShape) -> ReductionShape:
+def convert_rs_to_pt_type(input_shape: Tuple[int], reduction_shape: ReductionAxes) -> ReductionAxes:
     if len(reduction_shape) == len(input_shape):
         pt_reduction_shape = [1]
     else:
diff --git a/nncf/torch/quantization/algo.py b/nncf/torch/quantization/algo.py
index df78e921bdc..7788f264146 100644
--- a/nncf/torch/quantization/algo.py
+++ b/nncf/torch/quantization/algo.py
@@ -65,6 +65,7 @@
 from nncf.common.quantization.structs import WeightQuantizerId
 from nncf.common.schedulers import BaseCompressionScheduler
 from nncf.common.statistics import NNCFStatistics
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.utils.api_marker import api
 from nncf.common.utils.backend import BackendType
 from nncf.common.utils.backend import copy_model
@@ -142,7 +143,6 @@
 from nncf.torch.structures import AutoQPrecisionInitArgs
 from nncf.torch.structures import QuantizationPrecisionInitArgs
 from nncf.torch.tensor_statistics.algo import TensorStatisticsCollectionBuilder
-from nncf.torch.tensor_statistics.collectors import ReductionShape
 from nncf.torch.tensor_statistics.statistics import MinMaxTensorStatistic
 from nncf.torch.tensor_statistics.statistics import TensorStatistic
 from nncf.torch.tensor_statistics.statistics import pt_convert_stat_to_min_max_tensor_stat
@@ -594,7 +594,7 @@ def _parse_precision_init_params(self, initializer_config: Dict) -> Tuple[str, B
     def _get_minmax_values_for_quantizer_locations(
         self,
         quantizer_setup: SingleConfigQuantizerSetup,
-        tensor_statistics: Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]],
+        tensor_statistics: Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]],
         target_model_graph: PTNNCFGraph,
     ) -> Dict[QuantizationPointId, MinMaxTensorStatistic]:
         retval = {}
@@ -674,7 +674,7 @@ def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformat
     @staticmethod
     def get_statistics_for_quantizer_setup(
         target_model: NNCFNetwork, quantizer_setup: QuantizerSetupBase, range_init_params: PTRangeInitParams
-    ) -> Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]]:
+    ) -> Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]]:
         if range_init_params is None:
             return {}
         observation_points_vs_collectors_dict = (
@@ -700,7 +700,7 @@ def get_statistics_for_quantizer_setup(
 
     def _get_statistics_for_final_range_init(
         self, target_model: NNCFNetwork, quantizer_setup: QuantizerSetupBase, range_init_params: PTRangeInitParams
-    ) -> Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]]:
+    ) -> Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]]:
         return self.get_statistics_for_quantizer_setup(target_model, quantizer_setup, range_init_params)
 
     def _get_single_config_quantizer_setup(self, target_model) -> SingleConfigQuantizerSetup:
@@ -1689,7 +1689,7 @@ def __init__(
         self,
         quantizer_setup: MultiConfigQuantizerSetup,
         initial_quantizer_setup: SingleConfigQuantizerSetup,
-        tensor_stats_for_all_setup_variations: Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]],
+        tensor_stats_for_all_setup_variations: Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]],
         hw_config: HWConfig = None,
     ):
         should_init = bool(tensor_stats_for_all_setup_variations)
@@ -1708,7 +1708,7 @@ def _get_single_config_quantizer_setup(self, target_model) -> SingleConfigQuanti
 
     def _get_statistics_for_final_range_init(
         self, target_model: NNCFNetwork, quantizer_setup: QuantizerSetupBase, range_init_params: PTRangeInitParams
-    ) -> Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]]:
+    ) -> Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]]:
         return self._tensor_stats
 
     def _build_controller(self, model: NNCFNetwork) -> "ExperimentalQuantizationController":
@@ -1764,7 +1764,7 @@ def __init__(
         quantizer_setup: MultiConfigQuantizerSetup,
         initial_quantizer_setup: SingleConfigQuantizerSetup,
         setup_to_module_id_translation_dict: Dict[QuantizationPointId, QuantizerId],
-        tensor_stats: Dict[PTTargetPoint, Dict[ReductionShape, TensorStatistic]],
+        tensor_stats: Dict[PTTargetPoint, Dict[ReductionAxes, TensorStatistic]],
         build_time_metric_info: QuantizationShareBuildTimeInfo,
         should_setup_adjust_pad_ops=False,
         hw_config: HWConfig = None,
diff --git a/nncf/torch/quantization/init_range.py b/nncf/torch/quantization/init_range.py
index 259e7ec1ada..03e5904a1ef 100644
--- a/nncf/torch/quantization/init_range.py
+++ b/nncf/torch/quantization/init_range.py
@@ -14,6 +14,7 @@
 from typing import Callable, Dict, List, Tuple
 
 import numpy as np
+import torch
 
 from nncf.common.graph.layer_attributes import WeightedLayerAttributes
 from nncf.common.quantization.initialization.range import RangeInitCollectorParams
@@ -27,7 +28,7 @@
 from nncf.common.quantization.structs import QuantizerId
 from nncf.common.quantization.structs import WeightQuantizerId
 from nncf.common.scopes import should_consider_scope
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.config.schemata.algo.quantization import RANGE_INIT_TYPES_VS_DESCRIPTIONS
 from nncf.torch.graph.graph import PTNNCFGraph
@@ -38,12 +39,12 @@
 from nncf.torch.quantization.layers import get_scale_shape
 from nncf.torch.quantization.translator import PTTargetPointTranslator
 from nncf.torch.tensor_statistics.algo import TensorStatisticObservationPoint
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMeanPercentileStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMedianMADStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMixedMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTPercentileStatisticCollector
+from nncf.torch.tensor_statistics.algo import create_register_input_hook
+from nncf.torch.tensor_statistics.collectors import get_mean_percentile_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_median_mad_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_min_max_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_mixed_min_max_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_percentile_tensor_collector
 from nncf.torch.tensor_statistics.statistics import pt_convert_stat_to_min_max_tensor_stat
 
 
@@ -103,9 +104,9 @@ def __init__(
         self._input_shape = input_shape
         self._channel_idx = channel_idx
 
-    def convert_reduction_shape(self, per_sample_stats) -> ReductionShape:
+    def get_reduction_axes(self, per_sample_stats) -> ReductionAxes:
         """
-        Calculates the reduction shape of the tensor.
+        Calculates the reduction axes of the tensor.
 
         :param per_sample_stats: Boolean flag that indicated whether statistics are collected per-sample or per-batch.
         :return: Shape to reduce to.
@@ -115,16 +116,27 @@ def convert_reduction_shape(self, per_sample_stats) -> ReductionShape:
         if self._per_channel:
             val = (ndims + self._channel_idx) % ndims
             reduction_shape.remove(val)
+            if not val and self.use_per_sample_stats(per_sample_stats):
+                raise RuntimeError("Batch dimension should be equal to zero")
         if self.use_per_sample_stats(per_sample_stats):
             reduction_shape = reduction_shape[1:]  # Assumes batch is the first dimension
         return tuple(reduction_shape)
 
+    def get_aggregation_axes(self, per_sample_stats) -> Tuple[int, ...]:
+        """
+        Calculates the aggregation axes of the tensor.
+
+        :param per_sample_stats: Boolean flag that indicated whether statistics are collected per-sample or per-batch.
+        :return: Shape to aggregate to.
+        """
+        return (0, 1) if self.use_per_sample_stats(per_sample_stats) else (0,)
+
 
 class StatCollectorGenerator:
     @staticmethod
     def generate_collectors_for_range_init_statistics_collection(
         target_model_graph: PTNNCFGraph, quantizer_setup: QuantizerSetupBase, range_init_params: PTRangeInitParams
-    ) -> Dict[TensorStatisticObservationPoint, Dict[ReductionShape, TensorStatisticCollectorBase]]:
+    ) -> Dict[TensorStatisticObservationPoint, Dict[ReductionAxes, TensorStatisticCollectorBase]]:
         retval = {}
         for qp in quantizer_setup.quantization_points.values():
             init_config = range_init_params.get_init_config_for_quantization_point(qp)
@@ -154,8 +166,8 @@ def generate_collectors_for_range_init_statistics_collection(
     @staticmethod
     def generate_stat_collector_for_range_init_config(
         init_config: RangeInitConfig,
-        reduction_shape: ReductionShape = None,
-        collector_params=None,
+        scale_shape: ReductionAxes = None,
+        collector_params: PTRangeInitCollectorParams = None,
         num_samples_to_collect_override: int = None,
     ) -> TensorStatisticCollectorBase:
         num_samples = init_config.num_init_samples
@@ -163,47 +175,73 @@ def generate_stat_collector_for_range_init_config(
             num_samples = num_samples_to_collect_override
         if init_config.init_type not in RANGE_INIT_TYPES_VS_DESCRIPTIONS:
             raise RuntimeError("Unknown range init type: {}".format(init_config.init_type))
+
+        use_per_sample_stats = collector_params.use_per_sample_stats(init_config.init_type == "mixed_min_max")
+        reduction_axes = collector_params.get_reduction_axes(use_per_sample_stats)
+        aggregation_axes = collector_params.get_aggregation_axes(use_per_sample_stats)
+
         if init_config.init_type == "min_max":
-            reduction_shape_converted = collector_params.convert_reduction_shape(per_sample_stats=False)
-            return PTMinMaxStatisticCollector(
-                collector_params.use_abs_max, reduction_shape_converted, reduction_shape, num_samples
+            return get_min_max_statistic_collector(
+                use_abs_max=collector_params.use_abs_max,
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                num_samples=num_samples,
             )
         if init_config.init_type == "mixed_min_max":
-            reduction_shape_converted = collector_params.convert_reduction_shape(per_sample_stats=True)
-            return PTMixedMinMaxStatisticCollector(
-                collector_params.use_per_sample_stats(per_sample_stats=True),
-                collector_params.use_abs_max,
-                collector_params.use_means_of_mins,
-                collector_params.use_means_of_maxs,
-                reduction_shape_converted,
-                reduction_shape,
-                num_samples,
+            return get_mixed_min_max_statistic_collector(
+                use_abs_max=collector_params.use_abs_max,
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                use_means_of_mins=collector_params.use_means_of_mins,
+                use_means_of_maxs=collector_params.use_means_of_maxs,
+                num_samples=num_samples,
             )
         if init_config.init_type == "mean_min_max":
-            reduction_shape_converted = collector_params.convert_reduction_shape(per_sample_stats=False)
-            return PTMeanMinMaxStatisticCollector(
-                collector_params.use_per_sample_stats(per_sample_stats=False),
-                collector_params.use_abs_max,
-                reduction_shape_converted,
-                reduction_shape,
-                num_samples,
+            return get_mixed_min_max_statistic_collector(
+                use_abs_max=collector_params.use_abs_max,
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                use_means_of_mins=True,
+                use_means_of_maxs=True,
+                num_samples=num_samples,
             )
         if init_config.init_type == "threesigma":
-            return PTMedianMADStatisticCollector(reduction_shape, num_samples)
+            return get_median_mad_statistic_collector(
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                num_samples=num_samples,
+            )
         if init_config.init_type == "percentile":
             min_percentile = init_config.init_type_specific_params.get("min_percentile", 0.1)
             max_percentile = init_config.init_type_specific_params.get("max_percentile", 99.9)
-            return PTPercentileStatisticCollector([min_percentile, max_percentile], reduction_shape, num_samples)
+            return get_percentile_tensor_collector(
+                percentiles_to_collect=(min_percentile, max_percentile),
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                num_samples=num_samples,
+            )
+
         if init_config.init_type == "mean_percentile":
             min_percentile = init_config.init_type_specific_params.get("min_percentile", 0.1)
             max_percentile = init_config.init_type_specific_params.get("max_percentile", 99.9)
-            return PTMeanPercentileStatisticCollector([min_percentile, max_percentile], reduction_shape, num_samples)
+            return get_mean_percentile_statistic_collector(
+                percentiles_to_collect=(min_percentile, max_percentile),
+                reduction_axes=reduction_axes,
+                aggregation_axes=aggregation_axes,
+                scale_shape=scale_shape,
+                num_samples=num_samples,
+            )
         raise ValueError("Range init type not handled!")
 
     @classmethod
     def get_all_scale_shapes_with_params(
         cls, qp: QuantizationPointBase, target_nncf_graph: PTNNCFGraph
-    ) -> Dict[ReductionShape, PTRangeInitCollectorParams]:
+    ) -> Dict[ReductionAxes, PTRangeInitCollectorParams]:
         qconfigs = qp.get_all_configs_list()
         if qp.is_weight_quantization_point():
             module_node = target_nncf_graph.get_node_by_name(qp.insertion_point.target_node_name)
@@ -249,9 +287,13 @@ def __init__(
         self.hook_handles = []
         self.batch_size = batch_size
 
-    def _get_fwd_hook(self, collector: TensorStatisticCollectorBase) -> Callable:
+    def _get_fwd_hook(
+        self, collector: TensorStatisticCollectorBase
+    ) -> Callable[["torch.Module", torch.Tensor, torch.Tensor], torch.Tensor]:
+        hook = create_register_input_hook(collector=collector)
+
         def fwd_hook(module, input_, output):
-            collector.register_input(input_[0])
+            hook(input_[0])
 
         return fwd_hook
 
diff --git a/nncf/torch/statistics/aggregator.py b/nncf/torch/statistics/aggregator.py
index 6c2c48256c6..41fdc20c4fa 100644
--- a/nncf/torch/statistics/aggregator.py
+++ b/nncf/torch/statistics/aggregator.py
@@ -23,6 +23,7 @@
 from nncf.torch.graph.transformations.commands import PTInsertionCommand
 from nncf.torch.nncf_network import NNCFNetwork
 from nncf.torch.tensor import PTNNCFTensor
+from nncf.torch.tensor_statistics.algo import create_register_input_hook
 
 
 class PTStatisticsAggregator(StatisticsAggregator):
@@ -41,6 +42,7 @@ def _get_transformation_layout_extra_outputs(
     ) -> TransformationLayout:
         transformation_layout = TransformationLayout()
         transformation_commands = []
+
         for _statistic_points in statistic_points.values():
             for _statistic_point in _statistic_points:
                 for collectors in _statistic_point.algorithm_to_tensor_collectors.values():
@@ -48,7 +50,7 @@ def _get_transformation_layout_extra_outputs(
                         transformation_commands.append(
                             PTInsertionCommand(
                                 _statistic_point.target_point,
-                                collector.register_input,
+                                create_register_input_hook(collector=collector),
                                 TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION,
                             )
                         )
diff --git a/nncf/torch/tensor.py b/nncf/torch/tensor.py
index 6dd4e88b68a..986adb46aa4 100644
--- a/nncf/torch/tensor.py
+++ b/nncf/torch/tensor.py
@@ -30,3 +30,6 @@ def __init__(self, tensor: torch.tensor):
     @property
     def device(self) -> torch.device:
         return self._tensor.device
+
+    def is_empty(self) -> bool:
+        return self.tensor.size == 0
diff --git a/nncf/torch/tensor_statistics/algo.py b/nncf/torch/tensor_statistics/algo.py
index 0f9c56f14e5..55cf2ee4b1b 100644
--- a/nncf/torch/tensor_statistics/algo.py
+++ b/nncf/torch/tensor_statistics/algo.py
@@ -8,26 +8,31 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-from typing import Dict, Set
+from typing import Callable, Dict, Set
+
+import torch
 
 from nncf.api.compression import CompressionStage
 from nncf.common.schedulers import StubCompressionScheduler
 from nncf.common.statistics import NNCFStatistics
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.config import NNCFConfig
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.torch.algo_selector import ZeroCompressionLoss
 from nncf.torch.compression_method_api import PTCompressionAlgorithmBuilder
 from nncf.torch.compression_method_api import PTCompressionAlgorithmController
+from nncf.torch.dynamic_graph.context import no_nncf_trace
 from nncf.torch.graph.transformations.commands import PTInsertionCommand
 from nncf.torch.graph.transformations.commands import PTTargetPoint
 from nncf.torch.graph.transformations.commands import TransformationPriority
 from nncf.torch.graph.transformations.layout import PTTransformationLayout
 from nncf.torch.nncf_network import NNCFNetwork
+from nncf.torch.tensor import PTNNCFTensor
 
 
 class TensorStatisticObservationPoint:
-    def __init__(self, target_point: PTTargetPoint, reduction_shapes: Set[ReductionShape] = None):
+    def __init__(self, target_point: PTTargetPoint, reduction_shapes: Set[ReductionAxes] = None):
         self.target_point = target_point
         self.reduction_shapes = reduction_shapes
 
@@ -38,6 +43,28 @@ def __eq__(self, other: "TensorStatisticObservationPoint"):
         return self.target_point == other.target_point
 
 
+def create_register_input_hook(collector: TensorCollector) -> Callable[[torch.Tensor], torch.Tensor]:
+    """
+    Function to create regiter inputs hook function.
+
+    :param collector: Collector to use in resulting hook.
+    :return: Register inputs hook function.
+    """
+
+    def register_inputs_hook(x: torch.Tensor) -> torch.Tensor:
+        """
+        Register inputs hook function.
+
+        :parameter x: tensor to register in hook.
+        :return: tensor to register in hook.
+        """
+        with no_nncf_trace():
+            collector.register_input_for_all_reducers(PTNNCFTensor(x))
+        return x
+
+    return register_inputs_hook
+
+
 class TensorStatisticsCollectionBuilder(PTCompressionAlgorithmBuilder):
     def __init__(
         self,
@@ -54,9 +81,10 @@ def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformat
         layout = PTTransformationLayout()
         for op, rs_vs_collector in self._observation_points_vs_collectors.items():
             for collector in rs_vs_collector.values():
-                hook_obj = collector.register_input
                 command = PTInsertionCommand(
-                    op.target_point, hook_obj, TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION
+                    op.target_point,
+                    create_register_input_hook(collector=collector),
+                    TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION,
                 )
                 layout.register(command)
         return layout
diff --git a/nncf/torch/tensor_statistics/collectors.py b/nncf/torch/tensor_statistics/collectors.py
index 186bbb18393..ce6f87eaf39 100644
--- a/nncf/torch/tensor_statistics/collectors.py
+++ b/nncf/torch/tensor_statistics/collectors.py
@@ -9,31 +9,41 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any, Callable, Deque, List, Optional, Union
+from functools import partial
+from typing import Deque, List, Optional, Tuple, Type, Union
 
+import numpy as np
 import torch
 
-from nncf.common.tensor import NNCFTensor
 from nncf.common.tensor import TensorElementsType
-from nncf.common.tensor_statistics.collectors import MeanMinMaxStatisticCollector
-from nncf.common.tensor_statistics.collectors import MeanPercentileStatisticCollector
-from nncf.common.tensor_statistics.collectors import MeanStatisticCollector
-from nncf.common.tensor_statistics.collectors import MedianMADStatisticCollector
-from nncf.common.tensor_statistics.collectors import MinMaxStatisticCollector
-from nncf.common.tensor_statistics.collectors import MixedMinMaxStatisticCollector
 from nncf.common.tensor_statistics.collectors import NNCFCollectorTensorProcessor
-from nncf.common.tensor_statistics.collectors import PercentileStatisticCollector
-from nncf.common.tensor_statistics.collectors import ReductionShape
-from nncf.common.tensor_statistics.reduction import np_percentile_reduce_like
-from nncf.torch.dynamic_graph.context import no_nncf_trace
+from nncf.common.tensor_statistics.collectors import NNCFTensor
+from nncf.experimental.common.tensor_statistics.collectors import AbsMaxReducer
+from nncf.experimental.common.tensor_statistics.collectors import AbsQuantileReducer
+from nncf.experimental.common.tensor_statistics.collectors import BatchMeanReducer
+from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MaxReducer
+from nncf.experimental.common.tensor_statistics.collectors import MeanAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MeanPerChReducer
+from nncf.experimental.common.tensor_statistics.collectors import MeanReducer
+from nncf.experimental.common.tensor_statistics.collectors import MedianAbsoluteDeviationAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MinAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MinReducer
+from nncf.experimental.common.tensor_statistics.collectors import NoopReducer
+from nncf.experimental.common.tensor_statistics.collectors import PercentileAggregator
+from nncf.experimental.common.tensor_statistics.collectors import QuantileReducer
+from nncf.experimental.common.tensor_statistics.collectors import ShapeAggregator
+from nncf.experimental.common.tensor_statistics.collectors import TensorAggregatorBase
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
+from nncf.quantization.advanced_parameters import StatisticsType
 from nncf.torch.tensor import PTNNCFTensor
-from nncf.torch.tensor_statistics.reduction import expand_like
 from nncf.torch.tensor_statistics.statistics import PTMeanTensorStatistic
 from nncf.torch.tensor_statistics.statistics import PTMedianMADTensorStatistic
 from nncf.torch.tensor_statistics.statistics import PTMinMaxTensorStatistic
 from nncf.torch.tensor_statistics.statistics import PTPercentileTensorStatistic
 
 
+# pylint: disable=too-many-public-methods
 class PTNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor):
     """
     A realization of the processing methods for PTNNCFTensors.
@@ -51,13 +61,15 @@ def reduce_max(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = Fa
     def abs(x: NNCFTensor) -> NNCFTensor:
         return PTNNCFTensor(torch.abs(x.tensor))
 
-    @staticmethod
-    def min(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
-        return PTNNCFTensor(torch.min(x1.tensor, x2.tensor))
+    @classmethod
+    def min(cls, *args) -> NNCFTensor:
+        stacked = cls.stack(args)
+        return cls.reduce_min(stacked, axis=0, keepdims=False)
 
-    @staticmethod
-    def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
-        return PTNNCFTensor(torch.max(x1.tensor, x2.tensor))
+    @classmethod
+    def max(cls, *args) -> NNCFTensor:
+        stacked = cls.stack(args)
+        return cls.reduce_max(stacked, axis=0, keepdims=False)
 
     @staticmethod
     def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
@@ -65,15 +77,33 @@ def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTe
 
     @staticmethod
     def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
-        return PTNNCFTensor(x.tensor.median(dim=axis, keepdim=keepdims))
+        # See https://github.com/pytorch/pytorch/issues/61582
+        if not isinstance(axis, int):
+            return PTNNCFTensor(torch.tensor(np.median(x.tensor.detach().cpu().numpy(), axis=axis, keepdims=keepdims)))
+        return PTNNCFTensor(torch.quantile(x.tensor, q=0.5, dim=axis, keepdim=keepdims).values)
 
-    @staticmethod
-    def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
-        raise NotImplementedError()
+    @classmethod
+    def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
+        if mask is None:
+            return cls.mean(x, axis=axis, keepdims=keepdims)
+        masked_x = np.ma.array(x.tensor.detach().cpu().numpy(), mask=mask.tensor)
+        result = np.ma.mean(masked_x, axis=axis, keepdims=keepdims).astype(masked_x.dtype)
+        if isinstance(result, np.ma.MaskedArray):
+            return PTNNCFTensor(torch.tensor(result.data))
+        return PTNNCFTensor(torch.tensor(result))
 
-    @staticmethod
-    def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
-        raise NotImplementedError()
+    @classmethod
+    def masked_median(
+        cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False
+    ) -> NNCFTensor:
+        # Implemented in numy as torch.masked.median is not implemented yet
+        if mask is None:
+            return cls.median(x, axis=axis, keepdims=keepdims)
+        masked_x = np.ma.array(x.tensor.detach().cpu().numpy(), mask=mask.tensor.detach().cpu().numpy())
+        result = np.ma.median(masked_x, axis=axis, keepdims=keepdims).astype(masked_x.dtype)
+        if isinstance(result, np.ma.MaskedArray):
+            return PTNNCFTensor(torch.tensor(result.data))
+        return PTNNCFTensor(torch.tensor(result))
 
     @staticmethod
     def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
@@ -87,6 +117,14 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
     def batch_mean(x: NNCFTensor) -> NNCFTensor:
         return PTNNCFTensor(torch.mean(x.tensor, axis=0, keepdims=True))
 
+    @staticmethod
+    def logical_or(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return PTNNCFTensor(torch.logical_or(input_.tensor, other.tensor))
+
+    @staticmethod
+    def less(input_: NNCFTensor, other: NNCFTensor) -> NNCFTensor:
+        return PTNNCFTensor(input_.tensor < other.tensor)
+
     @staticmethod
     def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFTensor:
         x = [t.tensor for t in x]
@@ -100,6 +138,10 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
         tensor_list = torch.unbind(tensor, dim=axis)
         return [PTNNCFTensor(t) for t in tensor_list]
 
+    @staticmethod
+    def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
+        return PTNNCFTensor(torch.squeeze(x.tensor, dim=dim))
+
     @staticmethod
     def sum(tensor: NNCFTensor) -> TensorElementsType:
         return torch.sum(tensor.tensor).item()
@@ -108,153 +150,379 @@ def sum(tensor: NNCFTensor) -> TensorElementsType:
     def quantile(
         tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
     ) -> List[NNCFTensor]:
-        raise NotImplementedError()
+        # See https://github.com/pytorch/pytorch/issues/61582
+        if not isinstance(axis, int):
+            result = torch.tensor(
+                np.quantile(tensor.tensor.detach().cpu().numpy(), q=quantile, axis=axis, keepdims=keepdims)
+            )
+        else:
+            result = torch.quantile(tensor.tensor, torch.tensor(quantile).type(tensor.tensor.dtype), axis, keepdims)
+        result = result.type(tensor.tensor.dtype)
+        return [PTNNCFTensor(x) for x in result]
 
     @classmethod
-    def no_outliers_map(
-        cls, x: NNCFTensor, fn: Callable[[NNCFTensor, Optional[int]], Any], axis: int = 0, alpha: float = 0.01
-    ):
-        raise NotImplementedError()
-
-
-class PTMinMaxStatisticCollector(MinMaxStatisticCollector):
-    def __init__(
-        self, use_abs_max: bool, reduction_shape: ReductionShape, output_shape: ReductionShape, num_samples: int = None
-    ):
-        super().__init__(use_abs_max, reduction_shape, num_samples)
-        self._output_shape = output_shape
+    def percentile(
+        cls,
+        tensor: NNCFTensor,
+        percentile: Union[float, List[float]],
+        axis: Union[int, tuple, list],
+        keepdims: bool = False,
+    ) -> List[TensorElementsType]:
+        quantile = np.true_divide(percentile, 100)
+        return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims)
 
     @staticmethod
-    def _get_processor() -> NNCFCollectorTensorProcessor:
-        return PTNNCFCollectorTensorProcessor()
-
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._register_input_common(PTNNCFTensor(x))
-
-    def _get_statistics(self) -> PTMinMaxTensorStatistic:
-        min_values = self._min_values.tensor.view(self._output_shape)
-        max_values = self._max_values.tensor.view(self._output_shape)
-        return PTMinMaxTensorStatistic(min_values, max_values)
-
-
-class PTMixedMinMaxStatisticCollector(MixedMinMaxStatisticCollector):
-    def __init__(
-        self,
-        use_per_sample_stats: bool,
-        use_abs_max: bool,
-        use_means_of_mins: bool,
-        use_means_of_maxs: bool,
-        reduction_shape: ReductionShape,
-        output_shape: ReductionShape,
-        num_samples: int = None,
-        window_size: int = None,
-    ):
-        super().__init__(
-            use_per_sample_stats,
-            use_abs_max,
-            use_means_of_mins,
-            use_means_of_maxs,
-            reduction_shape,
-            num_samples,
-            window_size,
-        )
-        self._output_shape = output_shape
+    def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
+        return NNCFTensor(a.tensor - b.tensor)
 
     @staticmethod
-    def _get_processor() -> NNCFCollectorTensorProcessor:
-        return PTNNCFCollectorTensorProcessor()
-
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._register_input_common(PTNNCFTensor(x))
-
-    def _get_statistics(self) -> PTMinMaxTensorStatistic:
-        min_values = self._min_aggregate().tensor.view(self._output_shape)
-        max_values = self._max_aggregate().tensor.view(self._output_shape)
-        return PTMinMaxTensorStatistic(min_values, max_values)
-
-
-class PTMeanMinMaxStatisticCollector(MeanMinMaxStatisticCollector):
-    def __init__(
-        self,
-        use_per_sample_stats: bool,
-        use_abs_max: bool,
-        reduction_shape: ReductionShape,
-        output_shape: ReductionShape,
-        num_samples: int = None,
-        window_size: int = None,
-    ):
-        super().__init__(use_per_sample_stats, use_abs_max, reduction_shape, num_samples, window_size)
-        self._output_shape = output_shape
+    def zero_elements(x: NNCFTensor) -> NNCFTensor:
+        pt_tensor = x.tensor
+        eps = torch.finfo(pt_tensor.dtype).eps
+        return NNCFTensor(pt_tensor.abs() < eps)
 
-    @staticmethod
-    def _get_processor() -> NNCFCollectorTensorProcessor:
-        return PTNNCFCollectorTensorProcessor()
 
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._register_input_common(PTNNCFTensor(x))
+class PTReducerMixIn:
+    def _get_processor(self):
+        return PTNNCFCollectorTensorProcessor
 
-    def _get_statistics(self) -> PTMinMaxTensorStatistic:
-        min_values = self._min_aggregate().tensor.view(self._output_shape)
-        max_values = self._max_aggregate().tensor.view(self._output_shape)
-        return PTMinMaxTensorStatistic(min_values, max_values)
+    def get_inplace_fn(self):
+        return None
 
+    def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
+        return []
 
-class PTMedianMADStatisticCollector(MedianMADStatisticCollector):
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._samples.append(x.detach().cpu().numpy())
 
-    def _get_statistics(self) -> PTMedianMADTensorStatistic:
-        numpy_median, numpy_mad = self._prepare_statistics()
-        median_tensor = torch.from_numpy(numpy_median).to(dtype=torch.float)
-        mad_tensor = torch.from_numpy(numpy_mad).to(dtype=torch.float)
+class PTNoopReducer(PTReducerMixIn, NoopReducer):
+    pass
 
-        median_tensor = expand_like(median_tensor, list(self._reduction_shape))
-        mad_tensor = expand_like(mad_tensor, list(self._reduction_shape))
 
-        return PTMedianMADTensorStatistic(median_tensor, mad_tensor)
+class PTMinReducer(PTReducerMixIn, MinReducer):
+    pass
 
 
-class PTPercentileStatisticCollector(PercentileStatisticCollector):
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._samples.append(x.detach().cpu().numpy())
+class PTMaxReducer(PTReducerMixIn, MaxReducer):
+    pass
 
-    def _get_statistics(self) -> PTPercentileTensorStatistic:
-        percentile_vs_values_dict = self._prepare_statistics()
-        for key, val in percentile_vs_values_dict.items():
-            torch_percentiles = torch.from_numpy(val).to(dtype=torch.float)
-            percentile_vs_values_dict[key] = expand_like(torch_percentiles, list(self._reduction_shape))
-        return PTPercentileTensorStatistic(percentile_vs_values_dict)
 
+class PTAbsMaxReducer(PTReducerMixIn, AbsMaxReducer):
+    pass
 
-class PTMeanPercentileStatisticCollector(MeanPercentileStatisticCollector):
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            for pct, val in self._all_pct_values.items():
-                np_vals = np_percentile_reduce_like(x.cpu().numpy(), self._reduction_shape, pct)
-                torch_vals = torch.from_numpy(np_vals).to(dtype=torch.float)
-                val.append(torch_vals)
 
-    def _get_statistics(self) -> PTPercentileTensorStatistic:
-        mean_percentile_values = {}
-        for pct, val in self._all_pct_values.items():
-            stacked_pct_vals = torch.stack(list(val))
-            mean_percentile_values[pct] = stacked_pct_vals.mean(dim=0).view(self._reduction_shape)
-        return PTPercentileTensorStatistic(mean_percentile_values)
+class PTMeanReducer(PTReducerMixIn, MeanReducer):
+    pass
 
 
-class PTMeanStatisticCollector(MeanStatisticCollector):
-    @staticmethod
-    def _get_processor() -> NNCFCollectorTensorProcessor:
-        return PTNNCFCollectorTensorProcessor()
+class PTQuantileReducer(PTReducerMixIn, QuantileReducer):
+    pass
+
+
+class PTAbsQuantileReducer(PTReducerMixIn, AbsQuantileReducer):
+    pass
+
+
+class PTBatchMeanReducer(PTReducerMixIn, BatchMeanReducer):
+    pass
+
+
+class PTMeanPerChanelReducer(PTReducerMixIn, MeanPerChReducer):
+    pass
+
+
+def _reshape_all(targets: Tuple[torch.Tensor, ...], target_shape: Tuple[int, ...]):
+    return map(lambda stat: torch.reshape(stat, target_shape), targets)
+
+
+def _get_wrapped_min_max_tensor_statistic(target_shape: Tuple[int, ...]) -> Type[PTMinMaxTensorStatistic]:
+    """
+    Returns PTMinMaxTensorStatistic type but all statistics are reshaped to target_shape.
+
+    :param target_shape: Target shape of the tensor statistic
+    :return: PTMinMaxTensorStatistic type but all statistics are reshaped to target_shape.
+    """
 
-    def _register_input(self, x: torch.Tensor):
-        with no_nncf_trace():
-            self._register_input_common(PTNNCFTensor(x))
+    class WrappedPTMinMaxTensorStatistic(PTMinMaxTensorStatistic):
+        def __init__(self, min_values, max_values):
+            min_values, max_values = _reshape_all((min_values, max_values), target_shape)
+            super().__init__(min_values, max_values)
 
-    def _get_statistics(self) -> PTMeanTensorStatistic:
-        return PTMeanTensorStatistic(self._mean_aggregate().tensor, self._shape())
+    return WrappedPTMinMaxTensorStatistic
+
+
+def _get_wrapped_percentile_tensor_statistic(target_shape: Tuple[int, ...]) -> Type[PTPercentileTensorStatistic]:
+    """
+    Returns PTPercentileTensorStatistic type but all statistics are reshaped to target_shape.
+
+    :param target_shape: Target shape of the tensor statistic
+    :return: PTPercentileTensorStatistic type but all statistics are reshaped to target_shape.
+    """
+
+    class WrappedPTPercentileTensorStatistic(PTPercentileTensorStatistic):
+        def __init__(self, percentile_vs_values_dict):
+            reshaped_percentiles = {}
+            for k, v in percentile_vs_values_dict.items():
+                reshaped_percentiles[k] = torch.reshape(v, target_shape)
+            super().__init__(reshaped_percentiles)
+
+    return WrappedPTPercentileTensorStatistic
+
+
+def get_min_max_statistic_collector(
+    use_abs_max: bool,
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    scale_shape: Tuple[int, ...],
+    num_samples: int,
+) -> TensorCollector:
+    """
+    Min max statistic collector builder.
+
+    :param use_abs_max: Whether to use abs max reducer or max reducer.
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param scale_shape: Target shape for collected statistics.
+    :param num_samples: Maximum number of samples to collect.
+    :return: Min max statistic collector.
+    """
+
+    tensor_collector = TensorCollector(_get_wrapped_min_max_tensor_statistic(target_shape=scale_shape))
+
+    aggregator_kwargs = {
+        "tensor_processor": PTNNCFCollectorTensorProcessor,
+        "num_samples": num_samples,
+        "aggregation_axes": aggregation_axes,
+    }
+    min_reducer = PTMinReducer(reduction_axes)
+    min_aggregator = MinAggregator(**aggregator_kwargs)
+    tensor_collector.register_statistic_branch(PTMinMaxTensorStatistic.MIN_STAT, min_reducer, min_aggregator)
+
+    max_reducer_cls = PTAbsMaxReducer if use_abs_max else PTMaxReducer
+    max_reducer = max_reducer_cls(reduction_axes)
+    max_aggregator = MaxAggregator(**aggregator_kwargs)
+    tensor_collector.register_statistic_branch(PTMinMaxTensorStatistic.MAX_STAT, max_reducer, max_aggregator)
+    return tensor_collector
+
+
+def get_mixed_min_max_statistic_collector(
+    use_abs_max: bool,
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    scale_shape: Tuple[int, ...],
+    use_means_of_mins: bool,
+    use_means_of_maxs: bool,
+    num_samples: int = None,
+    window_size: Optional[int] = None,
+) -> TensorCollector:
+    """
+    Mixed min max statistic collector builder.
+
+    :param use_abs_max: Whether to use abs max reducer or max reducer.
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param scale_shape: Target shape for collected statistics.
+    :param use_means_of_mins: Whether to use mean or min aggregator for minimum statistic branch.
+    :param use_means_of_maxs: Whether to use mean or max aggregator for maximum statistic branch.
+    :param num_samples: Maximum number of samples to collect.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Mixed min max statistic collector.
+    """
+    tensor_collector = TensorCollector(_get_wrapped_min_max_tensor_statistic(target_shape=scale_shape))
+    min_reducer = PTMinReducer(reduction_axes)
+
+    kwargs = {
+        "tensor_processor": PTNNCFCollectorTensorProcessor,
+        "num_samples": num_samples,
+        "aggregation_axes": aggregation_axes,
+        "window_size": window_size,
+    }
+    min_aggregator_cls = MeanAggregator if use_means_of_mins else MinAggregator
+    min_aggregator = min_aggregator_cls(**kwargs)
+    tensor_collector.register_statistic_branch(PTMinMaxTensorStatistic.MIN_STAT, min_reducer, min_aggregator)
+
+    max_reducer_cls = PTAbsMaxReducer if use_abs_max else PTMaxReducer
+    max_reducer = max_reducer_cls(reduction_axes)
+    max_aggregator_cls = MeanAggregator if use_means_of_maxs else MaxAggregator
+    max_aggregator = max_aggregator_cls(**kwargs)
+    tensor_collector.register_statistic_branch(PTMinMaxTensorStatistic.MAX_STAT, max_reducer, max_aggregator)
+
+    return tensor_collector
+
+
+def get_median_mad_statistic_collector(
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    scale_shape: Tuple[int, ...],
+    num_samples: int,
+    window_size: Optional[int] = None,
+) -> TensorCollector:
+    """
+    Median Absolute Deviation statistic collector builder.
+
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param scale_shape: Target shape for collected statistics.
+    :param num_samples: Maximum number of samples to collect.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Median Absolute Deviation statistic collector.
+
+    """
+
+    class WrappedPTMedianMADTensorStatistic(PTMedianMADTensorStatistic):
+        def __init__(self, median_values, mad_values):
+            median_values, mad_values = _reshape_all((median_values, mad_values), scale_shape)
+            super().__init__(median_values, mad_values)
+
+    return _get_collection_without_reduction(
+        MedianAbsoluteDeviationAggregator,
+        WrappedPTMedianMADTensorStatistic,
+        reduction_axes=reduction_axes,
+        aggregation_axes=aggregation_axes,
+        num_samples=num_samples,
+        window_size=window_size,
+    )
+
+
+def get_percentile_tensor_collector(
+    percentiles_to_collect: Tuple[int, ...],
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    scale_shape: Tuple[int, ...],
+    num_samples: int,
+    window_size: Optional[int] = None,
+) -> TensorCollector:
+    """
+    Percentile statistic collector builder.
+
+    :param percentiles_to_collect: Percetiles to use on aggregation phase.
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param scale_shape: Target shape for collected statistics.
+    :param num_samples: Maximum number of samples to collect.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Percentile statistic collector.
+    """
+    return _get_collection_without_reduction(
+        partial(PercentileAggregator, percentiles_to_collect=percentiles_to_collect),
+        _get_wrapped_percentile_tensor_statistic(target_shape=scale_shape),
+        reduction_axes=reduction_axes,
+        aggregation_axes=aggregation_axes,
+        num_samples=num_samples,
+        window_size=window_size,
+    )
+
+
+def _get_collection_without_reduction(
+    aggregator_cls: TensorAggregatorBase,
+    statistic_cls: TensorAggregatorBase,
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    num_samples: int,
+    window_size: Optional[int] = None,
+) -> TensorCollector:
+    """
+    Helper function to build a tensor collector which is reducing statistics exclusively during aggregation phase.
+
+    :param aggregator_cls: Aggregator class to build the tensor collector.
+    :param aggregator_cls: Statistic class to build the tensor collector.
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param num_samples: Maximum number of samples to collect.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Target statistic collector.
+    """
+    tensor_collector = TensorCollector(statistic_cls)
+    reducer = PTNoopReducer()
+    aggregation_axes = list(set(list(aggregation_axes) + [dim + 1 for dim in reduction_axes]))
+    aggregator = aggregator_cls(
+        PTNNCFCollectorTensorProcessor,
+        aggregation_axes=aggregation_axes,
+        window_size=window_size,
+        num_samples=num_samples,
+    )
+
+    tensor_collector.register_statistic_branch(
+        PTMedianMADTensorStatistic.TENSOR_STATISTIC_OUTPUT_KEY, reducer, aggregator
+    )
+    return tensor_collector
+
+
+def get_mean_percentile_statistic_collector(
+    percentiles_to_collect: Tuple[int, ...],
+    reduction_axes: Tuple[int, ...],
+    aggregation_axes: Tuple[int, ...],
+    scale_shape: Tuple[int, ...],
+    num_samples: int,
+    window_size: Optional[int] = None,
+) -> TensorCollector:
+    """
+    Mean percentile statistic collector builder.
+
+    :param percentiles_to_collect: Percetiles to use on reduction phase.
+    :param reduction_axes: Axes to use in reduction functions.
+    :param aggregation_axes: Axes to use in aggregation functions.
+    :param scale_shape: Target shape for collected statistics.
+    :param num_samples: Maximum number of samples to collect.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Mean percentile statistic collector.
+    """
+    tensor_collector = TensorCollector(_get_wrapped_percentile_tensor_statistic(target_shape=scale_shape))
+    quantiles_to_collect = np.true_divide(percentiles_to_collect, 100)
+    reducer = PTQuantileReducer(reduction_axes=reduction_axes, quantile=quantiles_to_collect)
+    for output_port_id, p in enumerate(percentiles_to_collect):
+        aggregator = MeanAggregator(
+            PTNNCFCollectorTensorProcessor,
+            aggregation_axes=aggregation_axes,
+            num_samples=num_samples,
+            window_size=window_size,
+        )
+        tensor_collector.register_statistic_branch(
+            (PTPercentileTensorStatistic.PERCENTILE_VS_VALUE_DICT, p), reducer, aggregator, output_port_id
+        )
+    return tensor_collector
+
+
+def get_mean_statisitic_collector(
+    num_samples: int, channel_axis: int, window_size: Optional[int] = None
+) -> TensorCollector:
+    """
+    Mean statistic collector builder.
+
+    :param num_samples: Maximum number of samples to collect.
+    :param channel_axis: Channel axis to use during reduction phase.
+    :param window_size: Number of samples from the end of the list of collected samples to aggregate.
+        Aggregates all available collected statistics in case parameter is None.
+    :return: Mean statistic collector.
+    """
+    if channel_axis == 0:
+        reducer = PTBatchMeanReducer()
+    else:
+        reducer = PTMeanPerChanelReducer(channel_axis)
+    noop_reducer = PTNoopReducer()
+
+    kwargs = {
+        "tensor_processor": PTNNCFCollectorTensorProcessor,
+        "num_samples": num_samples,
+        "window_size": window_size,
+    }
+    aggregate_mean = MeanAggregator(**kwargs)
+    aggregate_shape = ShapeAggregator()
+
+    collector = TensorCollector(PTMeanTensorStatistic)
+    collector.register_statistic_branch(PTMeanTensorStatistic.MEAN_STAT, reducer, aggregate_mean)
+    collector.register_statistic_branch(PTMeanTensorStatistic.SHAPE_STAT, noop_reducer, aggregate_shape)
+    return collector
+
+
+PT_REDUCERS_MAP = {
+    StatisticsType.MIN: PTMinReducer,
+    StatisticsType.MAX: PTMaxReducer,
+    StatisticsType.ABS_MAX: PTAbsMaxReducer,
+    StatisticsType.MEAN: PTMeanReducer,
+    StatisticsType.QUANTILE: PTQuantileReducer,
+    StatisticsType.ABS_QUANTILE: PTAbsQuantileReducer,
+}
diff --git a/nncf/torch/tensor_statistics/statistics.py b/nncf/torch/tensor_statistics/statistics.py
index 7a251b19207..ba51df16ce9 100644
--- a/nncf/torch/tensor_statistics/statistics.py
+++ b/nncf/torch/tensor_statistics/statistics.py
@@ -9,6 +9,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Optional, Tuple
+
 import torch
 
 from nncf.common.tensor_statistics.statistics import MeanTensorStatistic
@@ -18,6 +20,10 @@
 from nncf.common.tensor_statistics.statistics import TensorStatistic
 
 
+def _reshape_all(targets: Tuple[torch.Tensor, ...], target_shape: Tuple[int, ...]):
+    return map(lambda stat: torch.reshape(stat, target_shape), targets)
+
+
 class PTMinMaxTensorStatistic(MinMaxTensorStatistic):
     @staticmethod
     def tensor_eq(tensor1: torch.Tensor, tensor2: torch.Tensor, rtol=1e-6) -> bool:
@@ -49,8 +55,8 @@ def pt_convert_stat_to_min_max_tensor_stat(statistic: TensorStatistic) -> PTMinM
         # Using three-sigma approach to estimate min and max
         # Constant factor depends on the distribution form - assuming normal and the factor is 1.4826
         return PTMinMaxTensorStatistic(
-            statistic.median_values - 3 * 1.4826230 * statistic.mad_values,
-            statistic.median_values + 3 * 1.4826230 * statistic.mad_values,
+            min_values=statistic.median_values - 3 * 1.4826230 * statistic.mad_values,
+            max_values=statistic.median_values + 3 * 1.4826230 * statistic.mad_values,
         )
     if isinstance(statistic, PTPercentileTensorStatistic):
         if len(statistic.percentile_vs_values_dict.keys()) < 2:
@@ -58,6 +64,7 @@ def pt_convert_stat_to_min_max_tensor_stat(statistic: TensorStatistic) -> PTMinM
         min_pct = min(statistic.percentile_vs_values_dict.keys())
         max_pct = max(statistic.percentile_vs_values_dict.keys())
         return PTMinMaxTensorStatistic(
-            statistic.percentile_vs_values_dict[min_pct], statistic.percentile_vs_values_dict[max_pct]
+            min_values=statistic.percentile_vs_values_dict[min_pct],
+            max_values=statistic.percentile_vs_values_dict[max_pct],
         )
     raise ValueError("Unknown TensorStatistic to generate min-max stat from!")
diff --git a/tests/common/experimental/test_reducers_and_aggregators.py b/tests/common/experimental/test_reducers_and_aggregators.py
index cc54fe987ac..30634421c9a 100644
--- a/tests/common/experimental/test_reducers_and_aggregators.py
+++ b/tests/common/experimental/test_reducers_and_aggregators.py
@@ -10,18 +10,26 @@
 # limitations under the License.
 
 from abc import abstractmethod
+from dataclasses import dataclass
+from functools import partial
 from itertools import product
+from typing import Any, List, Optional, Tuple
 
 import numpy as np
 import pytest
 
+from nncf.common.graph.layer_attributes import Dtype
+from nncf.common.tensor_statistics.collectors import NNCFCollectorTensorProcessor
+from nncf.experimental.common.tensor_statistics.collectors import AggregationAxes
 from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MeanAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MeanNoOutliersAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MedianAbsoluteDeviationAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MedianAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MedianNoOutliersAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MinAggregator
 from nncf.experimental.common.tensor_statistics.collectors import NoopAggregator
+from nncf.experimental.common.tensor_statistics.collectors import PercentileAggregator
 from nncf.experimental.common.tensor_statistics.collectors import ShapeAggregator
 
 DEFALUT_3D_MEAN_VALUE = [[2503.125, -2493.75, 5009.375], [-4987.5, 7515.625, -7481.25], [10021.875, -9975.0, 12528.125]]
@@ -43,17 +51,82 @@
 default_test_quantile = 0.1
 
 
-def default_test_mean_no_outlier(tp, ps):
-    return MeanNoOutliersAggregator(tp, ps, quantile=default_test_quantile)
+@dataclass
+class OfflineAggregatorTestCase:
+    aggregation_axes: Optional[AggregationAxes]
+    min_ref: np.ndarray
+    max_ref: np.ndarray
+
+
+OFFLINE_AGGREGATORS_TEST_CASES = [
+    OfflineAggregatorTestCase(
+        aggregation_axes=None,
+        min_ref=np.array([[[-50000, -4, -8], [-12, -16, -20], [-24, -28, -32]]]),
+        max_ref=np.array([[[50000, 4, 8], [12, 16, 20], [24, 28, 32]]]),
+    ),
+    OfflineAggregatorTestCase(
+        aggregation_axes=(0,),
+        min_ref=np.array([[[-50000, -4, -8], [-12, -16, -20], [-24, -28, -32]]]),
+        max_ref=np.array([[[50000, 4, 8], [12, 16, 20], [24, 28, 32]]]),
+    ),
+    OfflineAggregatorTestCase(
+        aggregation_axes=(0, 2),
+        min_ref=np.array([[-50000, -28, -32]]),
+        max_ref=np.array([[50000, 28, 32]]),
+    ),
+    OfflineAggregatorTestCase(
+        aggregation_axes=(2,),
+        min_ref=np.array(
+            [
+                [[-50000, 5, 10]],
+                [[-40000, 4, 8]],
+                [[-30000, 3, 6]],
+                [[-20000, 2, 4]],
+                [[-10000, 1, 2]],
+                [[0, 0, 0]],
+                [[-6, -7, -8]],
+                [[-12, -14, -16]],
+                [[-18, -21, -24]],
+                [[-24, -28, -32]],
+            ]
+        ),
+        max_ref=np.array(
+            [
+                [[50000, -5, -10]],
+                [[40000, -4, -8]],
+                [[30000, -3, -6]],
+                [[20000, -2, -4]],
+                [[10000, -1, -2]],
+                [[0, 0, 0]],
+                [[6, 7, 8]],
+                [[12, 14, 16]],
+                [[18, 21, 24]],
+                [[24, 28, 32]],
+            ]
+        ),
+    ),
+]
 
 
-def default_test_median_no_outlier(tp, ps):
-    return MedianNoOutliersAggregator(tp, ps, quantile=default_test_quantile)
+def default_test_mean_no_outlier(tensor_processor, aggregation_axes):
+    return MeanNoOutliersAggregator(
+        tensor_processor=tensor_processor,
+        aggregation_axes=aggregation_axes,
+        quantile=default_test_quantile,
+    )
+
+
+def default_test_median_no_outlier(tensor_processor, aggregation_axes):
+    return MedianNoOutliersAggregator(
+        tensor_processor=tensor_processor,
+        aggregation_axes=aggregation_axes,
+        quantile=default_test_quantile,
+    )
 
 
 class TemplateTestReducersAggreagtors:
     @abstractmethod
-    def get_nncf_tensor(self, x: np.array):
+    def get_nncf_tensor(self, x: np.array, dtype: Optional[Dtype] = None):
         pass
 
     @pytest.fixture
@@ -70,6 +143,18 @@ def reducers(self):
     def all_close(self, val, ref) -> bool:
         pass
 
+    @abstractmethod
+    def squeeze_tensor(self, ref_tensor: List[Any], axes: Optional[Tuple[int]] = None):
+        pass
+
+    @abstractmethod
+    def cast_tensor(self, tensor, dtype: Dtype):
+        pass
+
+    @abstractmethod
+    def expand_dims(self, tensor, dims: Tuple[int, ...]):
+        pass
+
     def test_noop_reducer(self, reducers):
         reducer = reducers["noop"]()
         input_ = np.arange(24).reshape((1, 2, 3, 4))
@@ -87,27 +172,27 @@ def test_noop_reducer(self, reducers):
         ],
     )
     def test_min_max_mean_reducers(self, reducer_name, ref, reducers):
-        reduction_shape = (1, 2)
+        reduction_axes = (1, 2)
         input_ = np.arange(-26, 10).reshape((4, 3, 3))
-        for i, red_shape in enumerate([reduction_shape, None]):
-            reducer = reducers[reducer_name](red_shape, False)
-            val = reducer([self.get_nncf_tensor(input_)])
+        for i, reduction_axes_ in enumerate([reduction_axes, None]):
+            reducer = reducers[reducer_name](reduction_axes=reduction_axes_, inplace=False)
+            val = reducer([self.get_nncf_tensor(input_, Dtype.FLOAT)])
             assert len(val) == 1
-            assert self.all_close(val[0].tensor, ref[i])
+            assert self.all_close(val[0].tensor, self.cast_tensor(ref[i], Dtype.FLOAT))
 
     @pytest.mark.parametrize(
         "reducer_name,ref", [("quantile", ([[[[-20000]]]], [[[[10000]]]])), ("abs_quantile", ([[[[20000]]]],))]
     )
     def test_quantile_reducers(self, reducer_name, ref, reducers):
-        reduction_shape = (1, 2, 3)
+        reduction_axes = (1, 2, 3)
         input_ = np.arange(-26, 10).reshape((1, 4, 3, 3))
         input_[0][0][0] = -20000
         input_[0][0][1] = 10000
-        reducer = reducers[reducer_name](reduction_shape, inplace=False)
-        val = reducer([self.get_nncf_tensor(input_)])
+        reducer = reducers[reducer_name](reduction_axes=reduction_axes, inplace=False)
+        val = reducer([self.get_nncf_tensor(input_, dtype=Dtype.FLOAT)])
         assert len(val) == len(ref)
         for i, ref_ in enumerate(ref):
-            assert self.all_close(val[i].tensor, ref_)
+            assert self.all_close(val[i].tensor, self.cast_tensor(ref_, Dtype.FLOAT))
 
     @pytest.mark.parametrize(
         "reducer_name,ref",
@@ -116,9 +201,9 @@ def test_quantile_reducers(self, reducer_name, ref, reducers):
     def test_batch_mean_mean_per_ch_reducers(self, reducer_name, ref, reducers):
         input_ = np.arange(-26, 10).reshape((4, 1, 3, 3))
         reducer = reducers[reducer_name](inplace=False)
-        val = reducer([self.get_nncf_tensor(input_)])
+        val = reducer([self.get_nncf_tensor(input_, Dtype.FLOAT)])
         assert len(val) == 1
-        assert self.all_close(val[0].tensor, ref)
+        assert self.all_close(val[0].tensor, self.cast_tensor(ref, Dtype.FLOAT))
 
     def test_noop_aggregator(self):
         aggregator = NoopAggregator(None)
@@ -146,24 +231,33 @@ def test_shape_aggregator(self):
         assert aggregator._collected_samples == 1
         assert ref_shape == aggregator.aggregate()
 
-    def test_min_max_aggregators(self, tensor_processor):
-        min_aggregator = MinAggregator(tensor_processor)
-        max_aggregator = MaxAggregator(tensor_processor)
+    @pytest.mark.parametrize(
+        "offline_aggregators_test_desc",
+        OFFLINE_AGGREGATORS_TEST_CASES,
+    )
+    def test_min_max_aggregators(
+        self, offline_aggregators_test_desc: OfflineAggregatorTestCase, tensor_processor: NNCFCollectorTensorProcessor
+    ):
+        aggregation_axes = offline_aggregators_test_desc.aggregation_axes
+        min_aggregator = MinAggregator(tensor_processor=tensor_processor, aggregation_axes=aggregation_axes)
+        max_aggregator = MaxAggregator(tensor_processor=tensor_processor, aggregation_axes=aggregation_axes)
         input_ = np.arange(3 * 3).reshape((1, 3, 3))
         input_[0, 0, 0] = -10000
         for i in range(-5, 5):
             min_aggregator.register_reduced_input(self.get_nncf_tensor(input_ * (-i)))
             max_aggregator.register_reduced_input(self.get_nncf_tensor(input_ * i))
 
-        min_ref = [[[-50000, -4, -8], [-12, -16, -20], [-24, -28, -32]]]
-        assert self.all_close(min_ref, min_aggregator.aggregate())
-
-        max_ref = [[[50000, 4, 8], [12, 16, 20], [24, 28, 32]]]
-        assert self.all_close(max_ref, max_aggregator.aggregate())
+        min_ref = offline_aggregators_test_desc.min_ref
+        max_ref = offline_aggregators_test_desc.max_ref
+        assert self.all_close(
+            min_aggregator.aggregate(),
+            min_ref,
+        )
+        assert self.all_close(max_aggregator.aggregate(), max_ref)
 
     NO_OUTLIERS_TEST_PARAMS = [
         (MeanAggregator, True, 1, 1404.5138888888905),
-        (MedianAggregator, True, 1, 15.5),
+        (MedianAggregator, True, 1, 24.0),
         (
             MeanAggregator,
             False,
@@ -179,8 +273,8 @@ def test_min_max_aggregators(self, tensor_processor):
         (MedianAggregator, True, 3, DEFALUT_3D_MEDIAN_VALUE),
         (MeanAggregator, False, 3, [DEFALUT_3D_MEAN_VALUE]),
         (MedianAggregator, False, 3, [DEFALUT_3D_MEDIAN_VALUE]),
-        (default_test_mean_no_outlier, True, 1, 1404.5138888888905),
-        (default_test_median_no_outlier, True, 1, 15.5),
+        (default_test_mean_no_outlier, True, 1, 20.0893),
+        (default_test_median_no_outlier, True, 1, 30.0),
         (
             default_test_mean_no_outlier,
             False,
@@ -211,19 +305,86 @@ def test_mean_median_agggregators(self, aggregator_cls, refs, tensor_processor,
             input_ = input_.reshape((1, 3, 3))
             input_with_outliers = input_with_outliers.reshape((1, 3, 3))
 
-        aggregator = aggregator_cls(tensor_processor, use_per_sample_stats)
+        aggregation_axes = (0, 1) if use_per_sample_stats else (0,)
+        aggregator = aggregator_cls(tensor_processor=tensor_processor, aggregation_axes=aggregation_axes)
         for i in range(1, 6):
-            aggregator.register_reduced_input(self.get_nncf_tensor(input_ * i))
+            aggregator.register_reduced_input(self.get_nncf_tensor(input_ * i, Dtype.FLOAT))
         # this registration is to make diff between mean and median bigger
-        aggregator.register_reduced_input(self.get_nncf_tensor(input_ * 10))
+        aggregator.register_reduced_input(self.get_nncf_tensor(input_ * 10, Dtype.FLOAT))
         is_median = isinstance(aggregator, (MedianAggregator, MedianNoOutliersAggregator))
         # Outliers registration
         for i in range(2):
             # mult is needed to make outlier and no outlier aggreagators differs
             mult = 2.2 * i - 1 if not is_median else 1
-            aggregator.register_reduced_input(self.get_nncf_tensor(input_with_outliers * mult))
+            aggregator.register_reduced_input(self.get_nncf_tensor(input_with_outliers * mult, Dtype.FLOAT))
+            if is_median and dims == 1 and use_per_sample_stats:
+                # To make no outliers and outliers versions return different output
+                aggregator.register_reduced_input(
+                    self.get_nncf_tensor(np.full(input_with_outliers.shape, input_with_outliers[-1]), Dtype.FLOAT)
+                )
+        ret_val = aggregator.aggregate()
+
+        assert self.all_close(ret_val, self.cast_tensor(refs, Dtype.FLOAT))
+
+    REF_MAD_PERCENTILE_REF_VALUES = {
+        MedianAbsoluteDeviationAggregator: {
+            None: {
+                "median_values": np.array([4.5, 9.0, 13.5, 18.0, 22.5, 27.0, 31.5, 36.0, 40.5]),
+                "mad_values": np.array([2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 17.5, 20.0, 22.5]),
+            },
+            (0,): {
+                "median_values": np.array([4.5, 9.0, 13.5, 18.0, 22.5, 27.0, 31.5, 36.0, 40.5]),
+                "mad_values": np.array([2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 17.5, 20.0, 22.5]),
+            },
+            (0, 1): {
+                "median_values": np.array(18.0),
+                "mad_values": np.array(12.0),
+            },
+        },
+        PercentileAggregator: {
+            None: {
+                5: np.array([0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6]),
+                10: np.array([0.8, 1.6, 2.4, 3.2, 4.0, 4.8, 5.6, 6.4, 7.2]),
+                90: np.array([7.2, 14.4, 21.6, 28.8, 36.0, 43.2, 50.4, 57.6, 64.8]),
+                95: np.array([7.6, 15.2, 22.8, 30.4, 38.0, 45.6, 53.2, 60.8, 68.4]),
+            },
+            (0,): {
+                5: np.array([0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6]),
+                10: np.array([0.8, 1.6, 2.4, 3.2, 4.0, 4.8, 5.6, 6.4, 7.2]),
+                90: np.array([7.2, 14.4, 21.6, 28.8, 36.0, 43.2, 50.4, 57.6, 64.8]),
+                95: np.array([7.6, 15.2, 22.8, 30.4, 38.0, 45.6, 53.2, 60.8, 68.4]),
+            },
+            (0, 1): {
+                5: np.array(0.0),
+                10: np.array(0.0),
+                90: np.array(48.0),
+                95: np.array(56.0),
+            },
+        },
+    }
+
+    @pytest.mark.parametrize(
+        "aggregator_cls",
+        [
+            MedianAbsoluteDeviationAggregator,
+            partial(
+                PercentileAggregator,
+                percentiles_to_collect=[5, 10, 90, 95],
+            ),
+        ],
+    )
+    @pytest.mark.parametrize("aggregation_axes", [None, (0,), (0, 1)])
+    def test_mad_percentile_aggregators(self, aggregator_cls, tensor_processor, aggregation_axes):
+        aggregator = aggregator_cls(tensor_processor=tensor_processor, aggregation_axes=aggregation_axes)
+        input_ = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32)
+        for i in range(9):
+            aggregator.register_reduced_input(self.get_nncf_tensor(input_ * i, Dtype.FLOAT))
+
         ret_val = aggregator.aggregate()
-        assert self.all_close(ret_val, refs)
+        ref_values = self.REF_MAD_PERCENTILE_REF_VALUES[aggregator.__class__][aggregation_axes]
+        assert len(ret_val) == len(ref_values)
+        for k, v in ref_values.items():
+            assert self.all_close(ret_val[k], self.cast_tensor(v, Dtype.FLOAT))
 
     @pytest.mark.parametrize(
         "reducer_name",
@@ -240,10 +401,10 @@ def test_reducers_name_hash_equal(self, reducer_name, reducers):
 
         params = {}
         if reducer_name in ["min", "max", "abs_max", "mean"]:
-            params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
+            params["reduction_axes"] = [None, (0, 1, 3), (1, 2, 3)]
             params["inplace"] = [False, True]
         elif reducer_name in ["quantile", "abs_quantile"]:
-            params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
+            params["reduction_axes"] = [None, (0, 1, 3), (1, 2, 3)]
             params["quantile"] = [[0.01, 0.99], [0.001, 0.999]]
         elif reducer_name == "batch_mean":
             params["inplace"] = [False, True]
diff --git a/tests/common/experimental/test_statistic_collector.py b/tests/common/experimental/test_statistic_collector.py
index 9346b176bd6..ba99c65d328 100644
--- a/tests/common/experimental/test_statistic_collector.py
+++ b/tests/common/experimental/test_statistic_collector.py
@@ -10,20 +10,41 @@
 # limitations under the License.
 
 from abc import abstractmethod
-from typing import List, Optional
+from typing import List, Optional, Type
 
 import numpy as np
 import pytest
 
 from nncf.common.tensor import NNCFTensor
+from nncf.common.tensor_statistics.statistics import MeanTensorStatistic
+from nncf.common.tensor_statistics.statistics import MedianMADTensorStatistic
+from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
+from nncf.common.tensor_statistics.statistics import PercentileTensorStatistic
+from nncf.common.tensor_statistics.statistics import RawTensorStatistic
+from nncf.experimental.common.tensor_statistics.collectors import AggregatorBase
 from nncf.experimental.common.tensor_statistics.collectors import MergedTensorCollector
-from nncf.experimental.common.tensor_statistics.collectors import TensorAggregatorBase
 from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
 from nncf.experimental.common.tensor_statistics.collectors import TensorType
 
-
 # pylint: disable=(protected-access)
+
+
+class NumpyNNCFTensor(NNCFTensor):
+    def __init__(self, tensor: np.array, dummy_device: Optional[str] = None):
+        # In case somebody attempts to wrap
+        # tensor twice
+        if isinstance(tensor, self.__class__):
+            tensor = tensor.tensor
+
+        super().__init__(tensor)
+        self.dummy_device = dummy_device
+
+    @property
+    def device(self) -> Optional[str]:
+        return self.dummy_device
+
+
 class DummyTensorReducer(TensorReducerBase):
     def __init__(self, output_name: str, inplace: bool = False, inplace_mock=None):
         super().__init__(inplace=inplace)
@@ -47,9 +68,9 @@ class DummyTensorReducerA(DummyTensorReducer):
     pass
 
 
-class DummyTensorAggregator(TensorAggregatorBase):
-    def __init__(self, num_samples: Optional[int]):
-        super().__init__(None, num_samples)
+class DummyTensorAggregator(AggregatorBase):
+    def __init__(self, num_samples: Optional[int] = None):
+        super().__init__(None, num_samples=num_samples)
 
     def _register_reduced_input_impl(self, x: TensorType):
         return self._container.append(x)
@@ -69,7 +90,7 @@ def test_aggregator_enabled_and_reset():
     collector.register_statistic_branch("A", reducer, aggregator)
     input_name = "input_name"
     inputs = TensorCollector.get_tensor_collector_inputs(
-        {input_name: NNCFTensor(np.array(100))}, [(hash(reducer), [input_name])]
+        {input_name: NumpyNNCFTensor(np.array(100))}, [(hash(reducer), [input_name])]
     )
 
     for _ in range(3):
@@ -125,7 +146,11 @@ def test_duplicated_statistics_are_merged():
         [(hash(reducer_inplace), ["Dummy_inplace"]), (hash(reducer_a), ["A"]), (hash(reducer), ["Dummy"])]
     )
 
-    outputs = {"Dummy": NNCFTensor(np.array(5)), "A": NNCFTensor(np.array(0)), "Dummy_inplace": NNCFTensor(np.array(6))}
+    outputs = {
+        "Dummy": NumpyNNCFTensor(np.array(5)),
+        "A": NumpyNNCFTensor(np.array(0)),
+        "Dummy_inplace": NumpyNNCFTensor(np.array(6)),
+    }
     target_inputs = TensorCollector.get_tensor_collector_inputs(outputs, output_info)
     collector.register_inputs(target_inputs)
 
@@ -142,10 +167,10 @@ def test_duplicated_statistics_are_merged():
     # Check aggregators recieved correct inputs
     assert len(statistics) == 6
     for k in "ABC":
-        assert statistics[k] == NNCFTensor(np.array(5))
-    assert statistics["D"] == NNCFTensor(np.array(5))
-    assert statistics["E"] == NNCFTensor(np.array(0))
-    assert statistics["F"] == NNCFTensor(np.array(6))
+        assert statistics[k] == NumpyNNCFTensor(np.array(5))
+    assert statistics["D"] == NumpyNNCFTensor(np.array(5))
+    assert statistics["E"] == NumpyNNCFTensor(np.array(0))
+    assert statistics["F"] == NumpyNNCFTensor(np.array(6))
 
 
 def test_inplace_param():
@@ -192,8 +217,8 @@ def test_merged_tensor_collector():
         assert collector.aggregators[common_branch_key] is common_aggregator
 
     output_info = merged_collector.get_output_info(None, None)
-    outputs = {"common_input": NNCFTensor(np.array(0))}
-    outputs.update({f"input_{idx + 1}": NNCFTensor(np.array(idx + 1)) for idx, _ in enumerate(collectors[:-1])})
+    outputs = {"common_input": NumpyNNCFTensor(np.array(0))}
+    outputs.update({f"input_{idx + 1}": NumpyNNCFTensor(np.array(idx + 1)) for idx, _ in enumerate(collectors[:-1])})
     target_inputs = TensorCollector.get_tensor_collector_inputs(outputs, output_info)
     merged_collector.register_inputs(target_inputs)
 
@@ -204,8 +229,8 @@ def test_merged_tensor_collector():
 
         statistic = collector.get_statistics()
         assert len(statistic) == 2
-        assert statistic["common"] == NNCFTensor(np.array(0))
-        assert statistic["unique"] == NNCFTensor(np.array(idx + 1))
+        assert statistic["common"] == NumpyNNCFTensor(np.array(0))
+        assert statistic["unique"] == NumpyNNCFTensor(np.array(idx + 1))
 
 
 def test_ambigous_container_key():
@@ -257,7 +282,7 @@ def test_multiple_branch_reducer():
             ],
         )
     ]
-    inputs = {name: NNCFTensor(np.array(i)) for i, name in enumerate(ref_output_info[0][1])}
+    inputs = {name: NumpyNNCFTensor(np.array(i)) for i, name in enumerate(ref_output_info[0][1])}
 
     output_info = collector.get_output_info(target_node_name, 0)
     assert output_info == ref_output_info
@@ -265,18 +290,75 @@ def test_multiple_branch_reducer():
     target_inputs = collector.get_tensor_collector_inputs(inputs, output_info)
     collector.register_inputs(target_inputs)
 
-    ref_stats = {"0": NNCFTensor(np.array(0)), "1": NNCFTensor(np.array(1))}
+    ref_stats = {"0": NumpyNNCFTensor(np.array(0)), "1": NumpyNNCFTensor(np.array(1))}
     stats = collector.get_statistics()
     assert len(ref_stats) == len(stats)
     for key, value in ref_stats.items():
         assert value == stats[key]
 
 
+def test_register_unnamed_statistics(mocker):
+    tensor_collector = TensorCollector()
+    reducer_hashes = []
+    for reducer_cls, key in zip([DummyTensorReducer, DummyTensorReducerA], "AB"):
+        reducer = reducer_cls(f"Dummy{key}")
+        tensor_collector.register_statistic_branch(key, reducer, DummyTensorAggregator(None))
+        reducer_hashes.append(hash(reducer))
+
+    tensor_collector.register_inputs = mocker.MagicMock()
+    inputs_ = NumpyNNCFTensor(np.ones(5))
+    tensor_collector.register_input_for_all_reducers(inputs_)
+
+    tensor_collector.register_inputs.assert_called_once()
+    args = tensor_collector.register_inputs.call_args[0][0]
+    assert len(args) == 2
+    for k, v in args.items():
+        assert k in reducer_hashes
+        assert len(v) == 1
+        assert all(v[0] == inputs_)
+
+
+def test_wrong_statistic_container_class():
+    class BadStatContainer:
+        pass
+
+    tensor_collector = TensorCollector(BadStatContainer)
+    tensor_collector.register_statistic_branch("A", DummyTensorReducer("A"), DummyTensorAggregator())
+    tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+    with pytest.raises(RuntimeError):
+        tensor_collector.get_statistics()
+
+
 class TemplateTestStatisticCollector:
     @abstractmethod
     def get_nncf_tensor_cls(self):
         pass
 
+    @abstractmethod
+    @pytest.fixture
+    def min_max_statistic_cls(self) -> Type[MinMaxTensorStatistic]:
+        pass
+
+    @abstractmethod
+    @pytest.fixture
+    def mean_statistic_cls(self) -> Type[MeanTensorStatistic]:
+        pass
+
+    @abstractmethod
+    @pytest.fixture
+    def median_mad_statistic_cls(self) -> Type[MedianMADTensorStatistic]:
+        pass
+
+    @abstractmethod
+    @pytest.fixture
+    def percentile_statistic_cls(self) -> Type[PercentileTensorStatistic]:
+        pass
+
+    @abstractmethod
+    @pytest.fixture
+    def raw_statistic_cls(self) -> Type[RawTensorStatistic]:
+        pass
+
     @pytest.mark.parametrize("inplace", [False, True])
     @pytest.mark.parametrize("any_not_empty", [False, True])
     def test_empty_tensors_register(self, inplace, any_not_empty):
@@ -313,3 +395,89 @@ def test_empty_tensors_register(self, inplace, any_not_empty):
         stats = collector.get_statistics()
         assert len(stats) == 1
         assert stats["A"] is None
+
+    def test_min_max_stat_building(self, min_max_statistic_cls: MinMaxTensorStatistic):
+        tensor_collector = TensorCollector(min_max_statistic_cls)
+        tensor_collector.register_statistic_branch(
+            min_max_statistic_cls.MIN_STAT, DummyTensorReducer("A"), DummyTensorAggregator()
+        )
+        tensor_collector.register_statistic_branch(
+            min_max_statistic_cls.MAX_STAT, DummyTensorReducer("B"), DummyTensorAggregator()
+        )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, MinMaxTensorStatistic)
+        assert statistic.min_values == statistic.max_values == NumpyNNCFTensor(1)
+
+    def test_mean_max_stat_building(self, mean_statistic_cls: MeanTensorStatistic):
+        tensor_collector = TensorCollector(mean_statistic_cls)
+        tensor_collector.register_statistic_branch(
+            mean_statistic_cls.MEAN_STAT, DummyTensorReducer("A"), DummyTensorAggregator()
+        )
+        tensor_collector.register_statistic_branch(
+            mean_statistic_cls.SHAPE_STAT, DummyTensorReducer("B"), DummyTensorAggregator()
+        )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, MeanTensorStatistic)
+        assert statistic.mean_values == statistic.shape == NumpyNNCFTensor(1)
+
+    def test_median_mad_stat_building(self, median_mad_statistic_cls: MedianMADTensorStatistic):
+        class DummyMADPercentileAggregator(DummyTensorAggregator):
+            def _aggregate_impl(self):
+                return {
+                    MedianMADTensorStatistic.MEDIAN_VALUES_STAT: self._container[0],
+                    MedianMADTensorStatistic.MAD_VALUES_STAT: self._container[0],
+                }
+
+        tensor_collector = TensorCollector(median_mad_statistic_cls)
+        tensor_collector.register_statistic_branch(
+            median_mad_statistic_cls.TENSOR_STATISTIC_OUTPUT_KEY,
+            DummyTensorReducer("A"),
+            DummyMADPercentileAggregator(),
+        )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, MedianMADTensorStatistic)
+        assert statistic.median_values == statistic.mad_values == NumpyNNCFTensor(1)
+
+    def test_percentile_max_stat_building(self, percentile_statistic_cls: PercentileTensorStatistic):
+        class DummyPercentileTensorAggregator(DummyTensorAggregator):
+            def _aggregate_impl(self):
+                return {0.5: self._container[0]}
+
+        tensor_collector = TensorCollector(percentile_statistic_cls)
+        tensor_collector.register_statistic_branch(
+            percentile_statistic_cls.TENSOR_STATISTIC_OUTPUT_KEY,
+            DummyTensorReducer("A"),
+            DummyPercentileTensorAggregator(),
+        )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, PercentileTensorStatistic)
+        assert statistic.percentile_vs_values_dict[0.5] == NumpyNNCFTensor(1)
+
+        tensor_collector = TensorCollector(percentile_statistic_cls)
+        qs = [0.3, 0.5, 0.7]
+        for q in qs:
+            tensor_collector.register_statistic_branch(
+                (PercentileTensorStatistic.PERCENTILE_VS_VALUE_DICT, q),
+                DummyTensorReducer(f"A{q}"),
+                DummyTensorAggregator(),
+            )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, PercentileTensorStatistic)
+        assert len(statistic.percentile_vs_values_dict) == len(qs)
+        for q in qs:
+            assert statistic.percentile_vs_values_dict[q] == NumpyNNCFTensor(1)
+
+    def test_raw_max_stat_building(self, raw_statistic_cls: RawTensorStatistic):
+        tensor_collector = TensorCollector(raw_statistic_cls)
+        tensor_collector.register_statistic_branch(
+            raw_statistic_cls.VALUES_STATS, DummyTensorReducer("A"), DummyTensorAggregator()
+        )
+        tensor_collector.register_input_for_all_reducers(NumpyNNCFTensor(1))
+        statistic = tensor_collector.get_statistics()
+        assert isinstance(statistic, RawTensorStatistic)
+        assert statistic.values == NNCFTensor(1)
diff --git a/tests/common/test_statistics_aggregator.py b/tests/common/test_statistics_aggregator.py
index cd0545a4580..6bfcf9ef753 100644
--- a/tests/common/test_statistics_aggregator.py
+++ b/tests/common/test_statistics_aggregator.py
@@ -98,7 +98,10 @@ def inplace_statistics(self) -> bool:
     @abstractmethod
     @pytest.fixture
     def is_backend_support_custom_estimators(self) -> bool:
-        pass
+        """
+        False if backend can initialize only following tensor collectors:
+        MinMax, MeanMinMax.
+        """
 
     @abstractmethod
     def reducers_map(self) -> List[TensorReducerBase]:
@@ -375,6 +378,7 @@ def test_statistics_aggregator_min_max(
         inplace_statistics,
         is_backend_support_custom_estimators,
     ):
+        inplace_statistics = False
         model = self.get_backend_model(dataset_samples)
         quantizer_config = QuantizerConfig(
             mode=test_parameters.quantization_mode, per_channel=test_parameters.per_channel
@@ -433,6 +437,10 @@ def filter_func(point):
             if isinstance(ref_min_val, np.ndarray):
                 assert stat.min_values.shape == ref_min_val.shape
                 assert stat.max_values.shape == ref_max_val.shape
+            else:
+                ref_shape = (1, 1, 1, 1) if is_stat_in_shape_of_scale else ()
+                assert stat.min_values.shape == ref_shape
+                assert stat.max_values.shape == ref_shape
 
     class BiasCorrectionAlgos(Enum):
         BIAS_CORRECTION = "bias_correction"
@@ -811,10 +819,10 @@ def test_same_collectors_different_attrs_dont_merge(self, statistics_type, test_
         model = params["model"](dataset_samples)
         params = {}
         if statistics_type in [StatisticsType.MIN, StatisticsType.MAX, StatisticsType.ABS_MAX, StatisticsType.MEAN]:
-            params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
+            params["reduction_axes"] = [None, (0, 1, 3), (1, 2, 3)]
             params["inplace"] = [False, True]
         elif statistics_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]:
-            params["reduction_shape"] = [None, (0, 1, 3), (1, 2, 3)]
+            params["reduction_axes"] = [None, (0, 1, 3), (1, 2, 3)]
             params["quantile"] = [[0.01, 0.99], [0.001, 0.999]]
         elif statistics_type == "batch_mean":
             pytest.skip("Inplace statistic woun't work until openvino==2023.0.0 release")
diff --git a/tests/onnx/quantization/test_quantizer_config.py b/tests/onnx/quantization/test_quantizer_config.py
index 374ae440f13..8702f4a42f0 100644
--- a/tests/onnx/quantization/test_quantizer_config.py
+++ b/tests/onnx/quantization/test_quantizer_config.py
@@ -12,6 +12,8 @@
 import pytest
 
 from nncf.common.graph.transformations.commands import TargetType
+from nncf.common.tensor_statistics.collectors import ReductionAxes
+from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXAddLayerMetatype
 from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXConvolutionMetatype
 from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXDepthwiseConvolutionMetatype
@@ -27,6 +29,9 @@
 ParamsCls = TemplateTestQuantizerConfig.TestGetStatisticsCollectorParameters
 
 
+# pylint: disable=protected-access
+
+
 class TestQuantizerConfig(TemplateTestQuantizerConfig):
     def get_algo_backend(self):
         return ONNXMinMaxAlgoBackend()
@@ -37,6 +42,9 @@ def check_is_min_max_statistic_collector(self, tensor_collector):
     def check_is_mean_min_max_statistic_collector(self, tensor_collector):
         assert isinstance(tensor_collector, ONNXMeanMinMaxStatisticCollector)
 
+    def get_reduction_axes(self, reducer: TensorStatisticCollectorBase) -> ReductionAxes:
+        return reducer._reduction_shape
+
     @pytest.fixture(
         params=[
             pytest.param(
diff --git a/tests/openvino/native/quantization/test_fq_configurations.py b/tests/openvino/native/quantization/test_calculation_quantizer_params.py
similarity index 100%
rename from tests/openvino/native/quantization/test_fq_configurations.py
rename to tests/openvino/native/quantization/test_calculation_quantizer_params.py
diff --git a/tests/openvino/native/quantization/test_quantizer_config.py b/tests/openvino/native/quantization/test_quantizer_config.py
index 45d41644ba4..773f5996604 100644
--- a/tests/openvino/native/quantization/test_quantizer_config.py
+++ b/tests/openvino/native/quantization/test_quantizer_config.py
@@ -12,10 +12,12 @@
 import pytest
 
 from nncf.common.graph.transformations.commands import TargetType
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MeanAggregator
 from nncf.experimental.common.tensor_statistics.collectors import MinAggregator
 from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
+from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
 from nncf.openvino.graph.layer_attributes import OVLayerAttributes
 from nncf.openvino.graph.metatypes.openvino_metatypes import OVConvolutionMetatype
 from nncf.openvino.graph.metatypes.openvino_metatypes import OVDepthwiseConvolutionMetatype
@@ -29,6 +31,9 @@
 ParamsCls = TemplateTestQuantizerConfig.TestGetStatisticsCollectorParameters
 
 
+# pylint: disable=protected-access
+
+
 class TestQuantizerConfig(TemplateTestQuantizerConfig):
     def get_algo_backend(self):
         return OVMinMaxAlgoBackend()
@@ -45,6 +50,9 @@ def check_is_mean_min_max_statistic_collector(self, tensor_collector: TensorColl
         assert MeanAggregator in aggrs
         assert aggrs[0].__class__ == aggrs[1].__class__
 
+    def get_reduction_axes(self, reducer: TensorReducerBase) -> ReductionAxes:
+        return reducer._reduction_axes
+
     @pytest.fixture(
         params=[
             pytest.param(
diff --git a/tests/openvino/native/quantization/test_reducers_and_aggregators.py b/tests/openvino/native/quantization/test_reducers_and_aggregators.py
index b34fc27a7c4..7b13174e961 100644
--- a/tests/openvino/native/quantization/test_reducers_and_aggregators.py
+++ b/tests/openvino/native/quantization/test_reducers_and_aggregators.py
@@ -9,9 +9,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Any, List, Optional, Tuple
+
 import numpy as np
 import pytest
 
+from nncf.common.graph.layer_attributes import Dtype
 from nncf.openvino.statistics.collectors import OVAbsMaxReducer
 from nncf.openvino.statistics.collectors import OVAbsQuantileReducer
 from nncf.openvino.statistics.collectors import OVBatchMeanReducer
@@ -31,7 +34,7 @@ class TestReducersAggregators(TemplateTestReducersAggreagtors):
     def tensor_processor(self):
         return OVNNCFCollectorTensorProcessor
 
-    def get_nncf_tensor(self, x: np.array):
+    def get_nncf_tensor(self, x: np.array, dtype: Optional[Dtype] = None):
         return OVNNCFTensor(x)
 
     @pytest.fixture(scope="module")
@@ -52,3 +55,12 @@ def all_close(self, val, ref) -> bool:
         val_ = np.array(val)
         ref_ = np.array(ref)
         return np.allclose(val_, ref_) and val_.shape == ref_.shape
+
+    def squeeze_tensor(self, ref_tensor: List[Any], axes: Optional[Tuple[int]] = None):
+        return np.squeeze(np.array(ref_tensor), axes)
+
+    def cast_tensor(self, tensor, dtype: Dtype):
+        return tensor
+
+    def expand_dims(self, tensor, dims: Tuple[int, ...]):
+        return np.expand_dims(np.array(tensor), dims)
diff --git a/tests/openvino/native/test_statistic_collector.py b/tests/openvino/native/test_statistic_collector.py
index 2d52c0af4cd..32123644942 100644
--- a/tests/openvino/native/test_statistic_collector.py
+++ b/tests/openvino/native/test_statistic_collector.py
@@ -9,6 +9,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Type
+
+import pytest
+
+from nncf.common.tensor_statistics.statistics import MeanTensorStatistic
+from nncf.common.tensor_statistics.statistics import MedianMADTensorStatistic
+from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
+from nncf.common.tensor_statistics.statistics import PercentileTensorStatistic
+from nncf.common.tensor_statistics.statistics import RawTensorStatistic
+from nncf.openvino.statistics.statistics import OVMeanTensorStatistic
+from nncf.openvino.statistics.statistics import OVMinMaxTensorStatistic
+from nncf.openvino.statistics.statistics import OVRawTensorStatistic
 from nncf.openvino.tensor import OVNNCFTensor
 from tests.common.experimental.test_statistic_collector import TemplateTestStatisticCollector
 
@@ -16,3 +28,31 @@
 class TestOVStatisticCollector(TemplateTestStatisticCollector):
     def get_nncf_tensor_cls(self):
         return OVNNCFTensor
+
+    @pytest.fixture
+    def min_max_statistic_cls(self) -> Type[MinMaxTensorStatistic]:
+        return OVMinMaxTensorStatistic
+
+    @pytest.fixture
+    def mean_statistic_cls(self) -> Type[MeanTensorStatistic]:
+        return OVMeanTensorStatistic
+
+    @pytest.fixture
+    def median_mad_statistic_cls(self) -> Type[MedianMADTensorStatistic]:
+        raise NotImplementedError()
+
+    @pytest.mark.skip()
+    def test_median_mad_stat_building(self, median_mad_statistic_cls: MedianMADTensorStatistic):
+        pass
+
+    @pytest.fixture
+    def percentile_statistic_cls(self) -> Type[PercentileTensorStatistic]:
+        raise NotImplementedError()
+
+    @pytest.mark.skip
+    def test_percentile_max_stat_building(self, percentile_statistic_cls: PercentileTensorStatistic):
+        pass
+
+    @pytest.fixture
+    def raw_statistic_cls(self) -> Type[RawTensorStatistic]:
+        return OVRawTensorStatistic
diff --git a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py
index b9b93153cfc..c26de05bff8 100644
--- a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py
+++ b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py
@@ -19,6 +19,7 @@
 from nncf.common.quantization.structs import QuantizationMode
 from nncf.common.quantization.structs import QuantizerConfig
 from nncf.common.quantization.structs import QuantizerGroup
+from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
 from nncf.quantization.fake_quantize import FakeQuantizeParameters
 from nncf.quantization.fake_quantize import calculate_quantizer_parameters
 from tests.post_training.conftest import FQ_CALCULATED_PARAMETERS_PATH
@@ -213,7 +214,7 @@ def test_calculate_quantizer_parameters(self, case_to_test):
         else:
             max_values = np.amax(data, axis=axes, keepdims=q_config.per_channel)
 
-        statistics = self.tensor_statistic(max_values=max_values, min_values=min_values)
+        statistics = self.tensor_statistic(min_values=min_values, max_values=max_values)
 
         if not case_to_test.should_fail:
             fq_params = calculate_quantizer_parameters(statistics, q_config, quant_group, narrow_range, half_range)
diff --git a/tests/post_training/test_templates/test_channel_alignment.py b/tests/post_training/test_templates/test_channel_alignment.py
index d3b6dd045e5..d48478b5fe1 100644
--- a/tests/post_training/test_templates/test_channel_alignment.py
+++ b/tests/post_training/test_templates/test_channel_alignment.py
@@ -480,20 +480,21 @@ class MockBackend(backend_cls):
     @pytest.mark.parametrize("inplace_ref", [False, True])
     @pytest.mark.parametrize("q_ref", [1e-4, 0.3])
     def test_statistic_collectors(self, inplace_ref, q_ref):
-        reduction_shape_ref = (0, 2, 3)
+        reduction_axes_ref = (0, 2, 3)
         num_samples_ref = 123
         statistic_collector: TensorCollector = self.get_backend_cls().get_statistic_collector(
-            reduction_shape=reduction_shape_ref, q=q_ref, num_samples=num_samples_ref, inplace=inplace_ref
+            reduction_shape=reduction_axes_ref, q=q_ref, num_samples=num_samples_ref, inplace=inplace_ref
         )
 
         assert len(statistic_collector.reducers) == 1
         reducer = statistic_collector.reducers.pop()
         assert isinstance(reducer, QuantileReducer)
-        assert reducer._reduction_shape == reduction_shape_ref
+        assert reducer._reduction_axes == reduction_axes_ref
         assert np.allclose(reducer._quantile, (q_ref, 1 - q_ref))
 
         assert len(statistic_collector.aggregators) == 2
         for aggr in statistic_collector.aggregators.values():
             assert isinstance(aggr, MedianAggregator)
             assert aggr.num_samples == num_samples_ref
-            assert not aggr._use_per_sample_stats
+            assert not aggr._keepdims
+            assert aggr._aggregation_axes == (0,)
diff --git a/tests/post_training/test_templates/test_quantizer_config.py b/tests/post_training/test_templates/test_quantizer_config.py
index e614138d0a9..dc9a33a0cf5 100644
--- a/tests/post_training/test_templates/test_quantizer_config.py
+++ b/tests/post_training/test_templates/test_quantizer_config.py
@@ -25,6 +25,7 @@
 from nncf.common.quantization.structs import QuantizationPreset
 from nncf.common.quantization.structs import QuantizerConfig
 from nncf.common.quantization.structs import QuantizerGroup
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.experimental.common.tensor_statistics.collectors import AbsMaxReducer
 from nncf.experimental.common.tensor_statistics.collectors import MaxReducer
 from nncf.experimental.common.tensor_statistics.collectors import MinReducer
@@ -53,6 +54,10 @@ def check_is_min_max_statistic_collector(self, tensor_collector):
     def check_is_mean_min_max_statistic_collector(self, tensor_collector):
         pass
 
+    @abstractmethod
+    def get_reduction_axes(self, reducer) -> ReductionAxes:
+        pass
+
     @abstractmethod
     @pytest.fixture
     def single_conv_nncf_graph(self) -> NNCFGraphToTest:
@@ -278,8 +283,8 @@ def test_get_stat_collector(
 
         for reducer in reducers:
             if q_config_per_channel:
-                assert reducer._reduction_shape == params.ref_per_ch_reduction_shape
+                assert self.get_reduction_axes(reducer) == params.ref_per_ch_reduction_shape
             else:
-                assert reducer._reduction_shape == params.ref_per_tensor_reduction_shape
+                assert self.get_reduction_axes(reducer) == params.ref_per_tensor_reduction_shape
 
         assert tensor_collector.num_samples == num_samples
diff --git a/tests/post_training/test_templates/test_smooth_quant.py b/tests/post_training/test_templates/test_smooth_quant.py
index 42fe17e01b0..88f2704b625 100644
--- a/tests/post_training/test_templates/test_smooth_quant.py
+++ b/tests/post_training/test_templates/test_smooth_quant.py
@@ -134,7 +134,7 @@ def test_get_abs_max_channel_collector(self):
         for inplace_type in [False, True]:
             backend_tensor_collector = backend.get_abs_max_channel_collector(
                 num_samples=samples,
-                stats_reduction_shape=reduction_shape,
+                stats_reduction_axes=reduction_shape,
                 inplace=inplace_type,
                 branch_key="test_branch",
             )
@@ -145,7 +145,7 @@ def test_get_abs_max_channel_collector(self):
             for reducer in backend_tensor_collector.reducers:
                 assert isinstance(reducer, AbsMaxReducer)
                 assert reducer.inplace == inplace_type
-                assert reducer._reduction_shape == reduction_shape
+                assert reducer._reduction_axes == reduction_shape
 
     @pytest.mark.parametrize(
         "model_cls, references",
diff --git a/tests/tensorflow/tensor_statistics/test_tensor_statistics.py b/tests/tensorflow/tensor_statistics/test_tensor_statistics.py
index e99487c69d9..30a675f7308 100644
--- a/tests/tensorflow/tensor_statistics/test_tensor_statistics.py
+++ b/tests/tensorflow/tensor_statistics/test_tensor_statistics.py
@@ -16,7 +16,7 @@
 import tensorflow as tf
 
 from nncf.common.tensor_statistics.collectors import OfflineTensorStatisticCollector
-from nncf.common.tensor_statistics.collectors import ReductionShape
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import StatisticsNotCollectedError
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.common.tensor_statistics.statistics import TensorStatistic
@@ -101,7 +101,7 @@ class TestCollectedStatistics:
     def test_collected_statistics_with_shape_convert(
         self,
         collector: Type[TensorStatisticCollectorBase],
-        reduction_shapes_vs_ref_statistic: Dict[Tuple[ReductionShape, ReductionShape], TensorStatistic],
+        reduction_shapes_vs_ref_statistic: Dict[Tuple[ReductionAxes, ReductionAxes], TensorStatistic],
     ):
         for reduction_shape in reduction_shapes_vs_ref_statistic.keys():
             collector_obj = collector(use_abs_max=True, reduction_shape=reduction_shape)
@@ -179,7 +179,7 @@ def test_collected_statistics_with_shape_convert(
     def test_collected_statistics(
         self,
         collector: Type[TensorStatisticCollectorBase],
-        reduction_shapes_vs_ref_statistic: Dict[ReductionShape, TensorStatistic],
+        reduction_shapes_vs_ref_statistic: Dict[ReductionAxes, TensorStatistic],
     ):
         for reduction_shape in reduction_shapes_vs_ref_statistic.keys():
             collector_obj = collector(reduction_shape=reduction_shape)
@@ -263,7 +263,7 @@ def test_num_samples(self, collector_for_num_samples_test: OfflineTensorStatisti
 
 
 class TestCollectorTensorProcessor:
-    tensor_processor = TFNNCFCollectorTensorProcessor()
+    tensor_processor = TFNNCFCollectorTensorProcessor
 
     def test_unstack(self):
         # Unstack tensor with dimensions
diff --git a/tests/torch/ptq/helpers.py b/tests/torch/ptq/helpers.py
index e019eb00f02..5e7b672aae1 100644
--- a/tests/torch/ptq/helpers.py
+++ b/tests/torch/ptq/helpers.py
@@ -94,6 +94,6 @@ def mock_collect_statistics(mocker):
     min_, max_ = 0.0, 1.0
     min_, max_ = torch.tensor(min_), torch.tensor(max_)
     _ = mocker.patch(
-        "nncf.common.tensor_statistics.collectors.TensorStatisticCollectorBase.get_statistics",
-        return_value=PTMinMaxTensorStatistic(min_, max_),
+        "nncf.experimental.common.tensor_statistics.collectors.TensorCollector.get_statistics",
+        return_value=PTMinMaxTensorStatistic(min_values=min_, max_values=max_),
     )
diff --git a/tests/torch/ptq/test_calculation_quantizer_params.py b/tests/torch/ptq/test_calculation_quantizer_params.py
index f98e5137d48..4f2553de432 100644
--- a/tests/torch/ptq/test_calculation_quantizer_params.py
+++ b/tests/torch/ptq/test_calculation_quantizer_params.py
@@ -271,7 +271,8 @@ def calculate_statistics(data, mode, qgroup, half_range=False):
         max_values = np.amax(data, axes)
 
     statistics = PTMinMaxTensorStatistic(
-        min_values=torch.from_numpy(np.array(min_values)), max_values=torch.from_numpy(np.array(max_values))
+        min_values=torch.from_numpy(np.array(min_values)),
+        max_values=torch.from_numpy(np.array(max_values)),
     )
     signedness_to_force = True if qgroup == QuantizerGroup.WEIGHTS else None
     qconfig = QuantizerConfig(num_bits=8, mode=mode, per_channel=per_ch, signedness_to_force=signedness_to_force)
diff --git a/tests/torch/ptq/test_ptq_params.py b/tests/torch/ptq/test_ptq_params.py
index c174ec8b322..35ddfe3128e 100644
--- a/tests/torch/ptq/test_ptq_params.py
+++ b/tests/torch/ptq/test_ptq_params.py
@@ -18,6 +18,10 @@
 from nncf.common.graph.transformations.commands import TargetType
 from nncf.common.quantization.structs import QuantizationPreset
 from nncf.common.utils.backend import BackendType
+from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MeanAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MinAggregator
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
 from nncf.parameters import ModelType
 from nncf.parameters import TargetDevice
 from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
@@ -33,8 +37,6 @@
 from nncf.torch.graph.operator_metatypes import PTModuleLinearMetatype
 from nncf.torch.graph.operator_metatypes import PTSoftmaxMetatype
 from nncf.torch.quantization.quantize_model import _create_nncf_config
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
 from tests.common.quantization.metatypes import Conv2dTestMetatype
 from tests.common.quantization.metatypes import LinearTestMetatype
 from tests.common.quantization.metatypes import SoftmaxTestMetatype
@@ -104,11 +106,17 @@ class TestPTQParams(TemplateTestPTQParams):
     def get_algo_backend(self):
         return PTMinMaxAlgoBackend()
 
-    def check_is_min_max_statistic_collector(self, tensor_collector):
-        assert isinstance(tensor_collector, PTMinMaxStatisticCollector)
-
-    def check_is_mean_min_max_statistic_collector(self, tensor_collector):
-        assert isinstance(tensor_collector, PTMeanMinMaxStatisticCollector)
+    def check_is_min_max_statistic_collector(self, tensor_collector: TensorCollector):
+        aggrs = [aggr.__class__ for aggr in tensor_collector.aggregators.values()]
+        assert len(aggrs) == 2
+        assert MinAggregator in aggrs
+        assert MaxAggregator in aggrs
+
+    def check_is_mean_min_max_statistic_collector(self, tensor_collector: TensorCollector):
+        aggrs = [aggr.__class__ for aggr in tensor_collector.aggregators.values()]
+        assert len(aggrs) == 2
+        assert MeanAggregator in aggrs
+        assert aggrs[0].__class__ == aggrs[1].__class__
 
     def check_quantize_outputs_fq_num(self, quantize_outputs, act_num_q, weight_num_q):
         if quantize_outputs:
diff --git a/tests/torch/ptq/test_quantizer_config.py b/tests/torch/ptq/test_quantizer_config.py
index 41cab6438b5..98e7de76ca2 100644
--- a/tests/torch/ptq/test_quantizer_config.py
+++ b/tests/torch/ptq/test_quantizer_config.py
@@ -9,12 +9,18 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from typing import Tuple
+
 import pytest
 
 from nncf.common.graph.transformations.commands import TargetType
+from nncf.common.tensor_statistics.collectors import ReductionAxes
+from nncf.experimental.common.tensor_statistics.collectors import MaxAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MeanAggregator
+from nncf.experimental.common.tensor_statistics.collectors import MinAggregator
+from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
+from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
 from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
 from tests.post_training.test_templates.models import NNCFGraphToTest
 from tests.post_training.test_templates.models import NNCFGraphToTestDepthwiseConv
 from tests.post_training.test_templates.models import NNCFGraphToTestSumAggregation
@@ -26,15 +32,27 @@
 ParamsCls = TemplateTestQuantizerConfig.TestGetStatisticsCollectorParameters
 
 
+# pylint: disable=protected-access
+
+
 class TestQuantizerConfig(TemplateTestQuantizerConfig):
     def get_algo_backend(self):
         return PTMinMaxAlgoBackend()
 
-    def check_is_min_max_statistic_collector(self, tensor_collector):
-        assert isinstance(tensor_collector, PTMinMaxStatisticCollector)
+    def check_is_min_max_statistic_collector(self, tensor_collector: TensorCollector):
+        aggrs = [aggr.__class__ for aggr in tensor_collector.aggregators.values()]
+        assert len(aggrs) == 2
+        assert MinAggregator in aggrs
+        assert MaxAggregator in aggrs
+
+    def check_is_mean_min_max_statistic_collector(self, tensor_collector: TensorCollector):
+        aggrs = [aggr.__class__ for aggr in tensor_collector.aggregators.values()]
+        assert len(aggrs) == 2
+        assert MeanAggregator in aggrs
+        assert aggrs[0].__class__ == aggrs[1].__class__
 
-    def check_is_mean_min_max_statistic_collector(self, tensor_collector):
-        assert isinstance(tensor_collector, PTMeanMinMaxStatisticCollector)
+    def get_reduction_axes(self, reducer: TensorReducerBase) -> ReductionAxes:
+        return reducer._reduction_axes
 
     @pytest.fixture(
         params=[
diff --git a/tests/torch/ptq/test_reducers_and_aggregators.py b/tests/torch/ptq/test_reducers_and_aggregators.py
new file mode 100644
index 00000000000..8bf86713700
--- /dev/null
+++ b/tests/torch/ptq/test_reducers_and_aggregators.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2023 Intel Corporation
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#      http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, List, Optional, Tuple
+
+import numpy as np
+import pytest
+import torch
+
+from nncf.common.graph.layer_attributes import Dtype
+from nncf.torch.tensor import PTNNCFTensor
+from nncf.torch.tensor_statistics.collectors import PTAbsMaxReducer
+from nncf.torch.tensor_statistics.collectors import PTAbsQuantileReducer
+from nncf.torch.tensor_statistics.collectors import PTBatchMeanReducer
+from nncf.torch.tensor_statistics.collectors import PTMaxReducer
+from nncf.torch.tensor_statistics.collectors import PTMeanPerChanelReducer
+from nncf.torch.tensor_statistics.collectors import PTMeanReducer
+from nncf.torch.tensor_statistics.collectors import PTMinReducer
+from nncf.torch.tensor_statistics.collectors import PTNNCFCollectorTensorProcessor
+from nncf.torch.tensor_statistics.collectors import PTNoopReducer
+from nncf.torch.tensor_statistics.collectors import PTQuantileReducer
+from tests.common.experimental.test_reducers_and_aggregators import TemplateTestReducersAggreagtors
+
+
+class TestReducersAggregators(TemplateTestReducersAggreagtors):
+    @pytest.fixture
+    def tensor_processor(self):
+        return PTNNCFCollectorTensorProcessor
+
+    def get_nncf_tensor(self, x: np.ndarray, dtype: Optional[Dtype] = None):
+        torch_tensor = torch.tensor(x)
+        if dtype == Dtype.FLOAT:
+            torch_tensor = torch_tensor.float()
+        elif dtype == Dtype.INTEGER:
+            torch_tensor = torch_tensor.int()
+        return PTNNCFTensor(torch_tensor)
+
+    @pytest.fixture(scope="module")
+    def reducers(self):
+        return {
+            "noop": PTNoopReducer,
+            "min": PTMinReducer,
+            "max": PTMaxReducer,
+            "abs_max": PTAbsMaxReducer,
+            "mean": PTMeanReducer,
+            "quantile": PTQuantileReducer,
+            "abs_quantile": PTAbsQuantileReducer,
+            "batch_mean": PTBatchMeanReducer,
+            "mean_per_ch": PTMeanPerChanelReducer,
+        }
+
+    def all_close(self, val, ref) -> bool:
+        val_ = torch.tensor(val)
+        ref_ = torch.tensor(ref)
+        return torch.allclose(val_, ref_) and val_.shape == ref_.shape
+
+    def squeeze_tensor(self, ref_tensor: List[Any], axes: Optional[Tuple[int]] = None):
+        if axes is None:
+            return torch.tensor(ref_tensor).squeeze()
+        return torch.tensor(ref_tensor).squeeze(axes)
+
+    def cast_tensor(self, tensor, dtype: Dtype):
+        tensor = torch.tensor(tensor)
+        if dtype == Dtype.FLOAT:
+            return tensor.float()
+        if dtype == Dtype.INTEGER:
+            return tensor.int()
+        raise RuntimeError()
+
+    def expand_dims(self, tensor, dims: Tuple[int, ...]):
+        tensor_ = torch.tensor(tensor)
+        shape = list(tensor_.shape)
+        for dim in dims:
+            shape.insert(dim, 1)
+        return tensor_.view(shape)
diff --git a/tests/torch/ptq/test_statistic_collector.py b/tests/torch/ptq/test_statistic_collector.py
new file mode 100644
index 00000000000..0ab1ef2bb55
--- /dev/null
+++ b/tests/torch/ptq/test_statistic_collector.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2023 Intel Corporation
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#      http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Type
+
+import pytest
+
+from nncf.common.tensor_statistics.statistics import MeanTensorStatistic
+from nncf.common.tensor_statistics.statistics import MedianMADTensorStatistic
+from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
+from nncf.common.tensor_statistics.statistics import PercentileTensorStatistic
+from nncf.common.tensor_statistics.statistics import RawTensorStatistic
+from nncf.torch.tensor import PTNNCFTensor
+from nncf.torch.tensor_statistics.statistics import PTMeanTensorStatistic
+from nncf.torch.tensor_statistics.statistics import PTMedianMADTensorStatistic
+from nncf.torch.tensor_statistics.statistics import PTMinMaxTensorStatistic
+from nncf.torch.tensor_statistics.statistics import PTPercentileTensorStatistic
+from tests.common.experimental.test_statistic_collector import TemplateTestStatisticCollector
+
+
+class TestOVStatisticCollector(TemplateTestStatisticCollector):
+    def get_nncf_tensor_cls(self):
+        return PTNNCFTensor
+
+    @pytest.fixture
+    def min_max_statistic_cls(self) -> Type[MinMaxTensorStatistic]:
+        return PTMinMaxTensorStatistic
+
+    @pytest.fixture
+    def mean_statistic_cls(self) -> Type[MeanTensorStatistic]:
+        return PTMeanTensorStatistic
+
+    @pytest.fixture
+    def median_mad_statistic_cls(self) -> Type[MedianMADTensorStatistic]:
+        return PTMedianMADTensorStatistic
+
+    @pytest.fixture
+    def percentile_statistic_cls(self) -> Type[PercentileTensorStatistic]:
+        return PTPercentileTensorStatistic
+
+    @pytest.fixture
+    def raw_statistic_cls(self) -> Type[RawTensorStatistic]:
+        raise NotImplementedError()
+
+    @pytest.mark.skip
+    def test_raw_max_stat_building(self, raw_statistic_cls: RawTensorStatistic):
+        pass
diff --git a/tests/torch/quantization/test_range_init.py b/tests/torch/quantization/test_range_init.py
index 84975de7bd4..6ef968a8bf9 100644
--- a/tests/torch/quantization/test_range_init.py
+++ b/tests/torch/quantization/test_range_init.py
@@ -11,8 +11,9 @@
 import itertools
 import re
 from collections import namedtuple
+from dataclasses import dataclass
 from functools import partial
-from typing import List, Tuple
+from typing import List, Tuple, Union
 
 import pytest
 import torch
@@ -22,6 +23,7 @@
 from torch.utils.data import DataLoader
 from torchvision.models import squeezenet1_1
 
+import nncf.torch.tensor_statistics.collectors as pt_collectors
 from nncf.common.graph import NNCFNodeName
 from nncf.common.quantization.initialization.range import PerLayerRangeInitConfig
 from nncf.common.quantization.initialization.range import RangeInitConfig
@@ -46,9 +48,7 @@
 from nncf.torch.quantization.layers import BaseQuantizer
 from nncf.torch.quantization.layers import PTQuantizerSpec
 from nncf.torch.quantization.layers import SymmetricQuantizer
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMedianMADStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
+from nncf.torch.tensor import PTNNCFTensor
 from nncf.torch.tensor_statistics.statistics import pt_convert_stat_to_min_max_tensor_stat
 from nncf.torch.utils import get_all_modules_by_type
 from nncf.torch.utils import safe_thread_call
@@ -64,6 +64,8 @@
 from tests.torch.quantization.quantization_helpers import post_compression_test_distr_init
 
 # pylint:disable=unused-import
+# pylint:disable=protected-access
+# pylint:disable=too-many-lines
 
 
 def scale_signed_dumping_worker(gpu, ngpus_per_node, config, tmp_path):
@@ -493,21 +495,23 @@ def forward(self, input_):
         return self.conv2d(input_)
 
 
+def _get_init_tensor_for_range_init_test() -> torch.Tensor:
+    test_input_sample = torch.empty([3, 100, 100])
+    test_input_sample[0] = torch.range(1, 10_000).view((100, 100))
+    test_input_sample[1] = test_input_sample[0] * -2
+    test_input_sample[2] = test_input_sample[0] * 3
+    return test_input_sample
+
+
 class SingleConv2dSyntheticWeightModel(torch.nn.Module):
     def __init__(self):
         super().__init__()
         self.conv2d = nn.Conv2d(3, 3, 100)
 
         with torch.no_grad():
-            for i in range(0, 100):
-                for j in range(0, 100):
-                    self.conv2d.weight[0][0][i][j] = i * 100 + j
-
+            value = _get_init_tensor_for_range_init_test()
             for i in range(0, 3):
-                for j in range(0, 3):
-                    if not (i == 0 and j == 0):
-                        self.conv2d.weight[i][j] = self.conv2d.weight[0][0]
-                        self.conv2d.weight[i][j] = self.conv2d.weight[0][0]
+                self.conv2d.weight[:, i] = value
 
     def forward(self, input_):
         return self.conv2d(input_)
@@ -519,36 +523,228 @@ def init_idfn(val):
     return val
 
 
+@dataclass
+class SymQuantizerScaleRef:
+    scale: Tuple[float, ...]
+
+
+@dataclass
+class AsymQuantizerScaleRef:
+    input_low: Tuple[float, ...]
+    input_range: Tuple[float, ...]
+
+
+@dataclass
+class GranularityQuantizerRefs:
+    per_channel: Union[SymQuantizerScaleRef, AsymQuantizerScaleRef]
+    per_tensor: Union[SymQuantizerScaleRef, AsymQuantizerScaleRef]
+
+
+@dataclass
+class RangeInitTestCase:
+    collector_name: str
+    weights_refs_symmetric: GranularityQuantizerRefs
+    weights_refs_assymetric: GranularityQuantizerRefs
+    activations_refs_symmetric: GranularityQuantizerRefs
+    activations_refs_assymetric: GranularityQuantizerRefs
+
+
 @pytest.mark.parametrize(
-    "range_init_type_vs_ref_vals",
+    "range_init_test_case",
     (
         [
-            ("min_max", 9999, 0, 9999),
-            ("mixed_min_max", 9999, 0, 9999),
-            ("mean_min_max", 9999, 0, 9999),
-            ("threesigma", 16119.5, -6119.5, 22239),
-            ("percentile", 6789, 3210, 3578),
+            RangeInitTestCase(
+                collector_name="min_max",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=30000.0),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((20000.0, 40000.0, 60000.0)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=60000.0),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.0, -40000.0, 3.0)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-40000.0, input_range=100000.0),
+                ),
+            ),
+            RangeInitTestCase(
+                collector_name="mixed_min_max",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=30000.0),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((20000.0, 40000.0, 60000.0)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=45000.0),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.0, -40000.0, 3.0)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-30000.0, input_range=75000.0),
+                ),
+            ),
+            RangeInitTestCase(
+                collector_name="mean_min_max",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=30000.0),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((15000.0, 30000.0, 45000.0)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=45000.0),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((1.5, -30000.0, 4.5)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((14998.5000, 29997.0000, 44995.5000)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-30000.0, input_range=75000.0),
+                ),
+            ),
+            RangeInitTestCase(
+                collector_name="threesigma",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((16120.1719, 32240.3438, 48360.5156)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=33780.2891),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((-6119.1719, -32240.3438, -18357.5156)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((22239.3438, 44478.6875, 66718.0312)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-26279.2871, input_range=60059.5781),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((21494.4707, 42988.9414, 64483.4141)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=52662.1367),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((-8159.4707, -42988.9414, -24478.4141)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((29653.9414, 59307.8828, 88961.8281)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-42660.1367, input_range=95322.2734),
+                ),
+            ),
+            RangeInitTestCase(
+                collector_name="percentile",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((6789.3213, 13580.6416, 20367.9629)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=7776.0),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((3210.6790, -13580.6416, 9632.0371)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((3578.6423, 7157.2837, 10735.9258)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-740.6420, input_range=8516.6416),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((9052.3213, 18108.0000, 27156.9629)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=10734.6426),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((4280.6792, -18108.0000, 12842.0371)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((4771.6421, 9544.0000, 14314.9258)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-988.0, input_range=11722.6426),
+                ),
+            ),
+            RangeInitTestCase(
+                collector_name="mean_percentile",
+                weights_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((9990.0010, 19980.0020, 29970.0039)).view(((3, 1, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=29910.0039),
+                ),
+                weights_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((10.999, -19980.0, 32.997)).view(((3, 1, 1, 1))),
+                        input_range=torch.tensor((9979.0020, 19958.0039, 29937.0078)).view(((3, 1, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-19940.0020, input_range=49850.0078),
+                ),
+                activations_refs_symmetric=GranularityQuantizerRefs(
+                    per_channel=SymQuantizerScaleRef(
+                        scale=torch.tensor((14985.0020, 29970.0039, 44955.0078)).view(((1, 3, 1, 1)))
+                    ),
+                    per_tensor=SymQuantizerScaleRef(scale=44865.0078),
+                ),
+                activations_refs_assymetric=GranularityQuantizerRefs(
+                    per_channel=AsymQuantizerScaleRef(
+                        input_low=torch.tensor((16.498, -2.9970e04, 49.496)).view(((1, 3, 1, 1))),
+                        input_range=torch.tensor((14968.5039, 29937.0078, 44905.5117)).view(((1, 3, 1, 1))),
+                    ),
+                    per_tensor=AsymQuantizerScaleRef(input_low=-29910.0039, input_range=74775.0156),
+                ),
+            ),
         ]
     ),
     ids=init_idfn,
 )
 def test_init_ranges_are_set(
-    quantization_mode: str, is_per_channel: bool, range_init_type_vs_ref_vals: Tuple[str, float, float, float]
+    quantization_mode: str,
+    is_per_channel: bool,
+    range_init_test_case: RangeInitTestCase,
 ):
     class SyntheticDataset(torch.utils.data.Dataset):
         def __init__(self):
             super().__init__()
-            self._length = 1
+            self._length = 2
 
         def __getitem__(self, idx):
             if idx >= self._length:
                 raise StopIteration
-            test_input_sample = torch.zeros([3, 100, 100])
-            for i in range(0, 100):
-                for j in range(0, 100):
-                    test_input_sample[0][i][j] = i * 100 + j
-            test_input_sample[1] = test_input_sample[0]
-            test_input_sample[2] = test_input_sample[0]
+            test_input_sample = _get_init_tensor_for_range_init_test() * (idx + 1)
             return test_input_sample, test_input_sample
 
         def __len__(self):
@@ -556,7 +752,7 @@ def __len__(self):
 
     data_loader = torch.utils.data.DataLoader(SyntheticDataset(), batch_size=1, drop_last=True)
 
-    range_init_type = range_init_type_vs_ref_vals[0]
+    range_init_type = range_init_test_case.collector_name
     config_with_init = NNCFConfig()
     config_with_init.update(
         {
@@ -566,7 +762,7 @@ def __len__(self):
                 "algorithm": "quantization",
                 "activations": {"mode": quantization_mode, "per_channel": is_per_channel},
                 "weights": {"mode": quantization_mode, "per_channel": is_per_channel},
-                "initializer": {"range": {"num_init_samples": 1, "type": range_init_type}},
+                "initializer": {"range": {"num_init_samples": 2, "type": range_init_type}},
             },
         }
     )
@@ -585,22 +781,30 @@ def __len__(self):
 
     act_quantizer_info = next(iter(compression_ctrl.non_weight_quantizers.values()))
 
-    ref_scale = range_init_type_vs_ref_vals[1]
-    ref_input_low = range_init_type_vs_ref_vals[2]
-    ref_input_high = range_init_type_vs_ref_vals[3]
+    if is_per_channel:
+        ref_scale = range_init_test_case.activations_refs_symmetric.per_channel.scale
+        ref_input_low = range_init_test_case.activations_refs_assymetric.per_channel.input_low
+        ref_input_range = range_init_test_case.activations_refs_assymetric.per_channel.input_range
+    else:
+        ref_scale = range_init_test_case.activations_refs_symmetric.per_tensor.scale
+        ref_input_low = range_init_test_case.activations_refs_assymetric.per_tensor.input_low
+        ref_input_range = range_init_test_case.activations_refs_assymetric.per_tensor.input_range
 
     def check_scales(quantizer: BaseQuantizer, per_channel: bool):
         # Absolute tolerance is 1.0 due to percentile value interpolation
         if quantization_mode == "symmetric":
-            assert torch.allclose(quantizer.scale, torch.ones_like(quantizer.scale) * ref_scale, atol=1.0)
+            assert torch.allclose(quantizer.scale, torch.tensor(ref_scale), atol=1.0)
             if per_channel:
                 assert quantizer.scale.numel() == 3
             else:
                 assert quantizer.scale.numel() == 1
         else:
-            assert torch.allclose(quantizer.input_low, torch.ones_like(quantizer.input_low) * ref_input_low, atol=1.0)
+            assert torch.allclose(quantizer.input_low, torch.tensor(ref_input_low), atol=1.0)
+
             assert torch.allclose(
-                quantizer.input_range, torch.ones_like(quantizer.input_low) * ref_input_high, atol=1.0
+                quantizer.input_range,
+                torch.tensor(ref_input_range),
+                atol=1.0,
             )
             if per_channel:
                 assert quantizer.input_low.numel() == 3
@@ -612,9 +816,19 @@ def check_scales(quantizer: BaseQuantizer, per_channel: bool):
     check_scales(act_quantizer_info.quantizer_module_ref, is_per_channel)
     # Weight init check
     synth_weight_model = SingleConv2dSyntheticWeightModel()
+    config_with_init["compression"]["initializer"]["range"]["num_init_samples"] = 1
     _, compression_ctrl = create_compressed_model_and_algo_for_test(synth_weight_model, config_with_init)
 
     weight_quantizer_info = next(iter(compression_ctrl.weight_quantizers.values()))
+    if is_per_channel:
+        ref_scale = range_init_test_case.weights_refs_symmetric.per_channel.scale
+        ref_input_low = range_init_test_case.weights_refs_assymetric.per_channel.input_low
+        ref_input_range = range_init_test_case.weights_refs_assymetric.per_channel.input_range
+    else:
+        ref_scale = range_init_test_case.weights_refs_symmetric.per_tensor.scale
+        ref_input_low = range_init_test_case.weights_refs_assymetric.per_tensor.input_low
+        ref_input_range = range_init_test_case.weights_refs_assymetric.per_tensor.input_range
+
     check_scales(weight_quantizer_info.quantizer_module_ref, is_per_channel)
 
 
@@ -671,53 +885,66 @@ def range_init_call_count_test_struct(request):
     return request.param
 
 
+class CustomSpy:
+    def __init__(self, fn) -> None:
+        self._fn = fn
+        self.call_count = 0
+        self.return_values_list = []
+
+    def __call__(self, *args, **kwargs):
+        self.call_count += 1
+        retval = self._fn(*args, **kwargs)
+        self.return_values_list.append(retval)
+        return retval
+
+
 # pylint:disable=redefined-outer-name
 def test_per_layer_range_init_collectors_are_called_the_required_number_of_times(
     range_init_call_count_test_struct, mocker
 ):
+    range_minmax_init_create_spy = CustomSpy(pt_collectors.get_min_max_statistic_collector)
+    mocker.patch("nncf.torch.quantization.init_range.get_min_max_statistic_collector", new=range_minmax_init_create_spy)
+    range_meanminmax_init_create_spy = CustomSpy(pt_collectors.get_mixed_min_max_statistic_collector)
+    mocker.patch(
+        "nncf.torch.quantization.init_range.get_mixed_min_max_statistic_collector", new=range_meanminmax_init_create_spy
+    )
+    range_threesigma_init_create_spy = CustomSpy(pt_collectors.get_median_mad_statistic_collector)
+    mocker.patch(
+        "nncf.torch.quantization.init_range.get_median_mad_statistic_collector", new=range_threesigma_init_create_spy
+    )
+
     config = create_config()
     config["compression"]["initializer"]["range"] = range_init_call_count_test_struct.range_init_config
     data_loader = TestRangeInit.create_dataloader(True, config, 10)
     config.register_extra_structs([QuantizationRangeInitArgs(data_loader)])
 
-    range_minmax_init_create_spy = mocker.spy(PTMinMaxStatisticCollector, "__init__")
-    range_meanminmax_init_create_spy = mocker.spy(PTMeanMinMaxStatisticCollector, "__init__")
-    range_threesigma_init_create_spy = mocker.spy(PTMedianMADStatisticCollector, "__init__")
-
-    range_minmax_init_register_input_spy = mocker.spy(PTMinMaxStatisticCollector, "_register_input")
-    range_meanminmax_init_register_input_spy = mocker.spy(PTMeanMinMaxStatisticCollector, "_register_input")
-    range_threesigma_init_register_input_spy = mocker.spy(PTMedianMADStatisticCollector, "_register_input")
-
     TestRangeInit.create_algo_and_compressed_model(config)
 
-    assert (
-        range_minmax_init_create_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_initializer_create["min_max"]
-    )
-    assert (
-        range_meanminmax_init_create_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_initializer_create["mean_min_max"]
-    )
-    assert (
-        range_threesigma_init_create_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_initializer_create["three_sigma"]
-    )
-
-    assert (
-        range_minmax_init_register_input_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_register_input["min_max"]
-    )
-    assert (
-        range_meanminmax_init_register_input_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_register_input["mean_min_max"]
-    )
-    assert (
-        range_threesigma_init_register_input_spy.call_count
-        == range_init_call_count_test_struct.expected_call_count_register_input["three_sigma"]
-    )
-
-
-QUANTIZER_RANGE_INITIALIZERS = ["min_max", "threesigma", "mean_min_max", "percentile", "mixed_min_max"]
+    for stat_type, spy in [
+        ("min_max", range_minmax_init_create_spy),
+        ("mean_min_max", range_meanminmax_init_create_spy),
+        ("three_sigma", range_threesigma_init_create_spy),
+    ]:
+        assert spy.call_count == range_init_call_count_test_struct.expected_call_count_initializer_create[stat_type]
+        collected_samples = 0
+        for tensor_collector in spy.return_values_list:
+            cur_values = set()
+            for aggr in tensor_collector.aggregators.values():
+                cur_values.add(aggr._collected_samples)
+            assert len(cur_values) == 1
+            collected_samples += cur_values.pop()
+
+        assert collected_samples == range_init_call_count_test_struct.expected_call_count_register_input[stat_type]
+
+
+QUANTIZER_RANGE_INITIALIZERS = [
+    "min_max",
+    "threesigma",
+    "mean_min_max",
+    "percentile",
+    "mixed_min_max",
+    "mean_percentile",
+]
 
 
 class QuantizeRangeInitScaleShapeTestStruct:
@@ -794,7 +1021,7 @@ def test_quantize_range_init_sets_correct_scale_shapes(quantizer_range_init_test
         collector = StatCollectorGenerator.generate_stat_collector_for_range_init_config(
             range_init_config, tuple(quantizer.scale_shape), collector_params
         )
-        collector.register_input(torch.ones(test_struct.input_shape))
+        collector.register_input_for_all_reducers(PTNNCFTensor(torch.ones(test_struct.input_shape)))
         stat = collector.get_statistics()
         minmax_values = pt_convert_stat_to_min_max_tensor_stat(stat)
         quantizer.apply_minmax_init(min_values=minmax_values.min_values, max_values=minmax_values.max_values)
diff --git a/tests/torch/tensor_statistics/test_tensor_statistics.py b/tests/torch/tensor_statistics/test_tensor_statistics.py
index 5c5cccc0220..05f731b57bf 100644
--- a/tests/torch/tensor_statistics/test_tensor_statistics.py
+++ b/tests/torch/tensor_statistics/test_tensor_statistics.py
@@ -15,19 +15,16 @@
 import pytest
 import torch
 
-from nncf.common.tensor_statistics.collectors import OfflineTensorStatisticCollector
-from nncf.common.tensor_statistics.collectors import ReductionShape
-from nncf.common.tensor_statistics.collectors import StatisticsNotCollectedError
+from nncf.common.tensor_statistics.collectors import ReductionAxes
 from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
 from nncf.common.tensor_statistics.statistics import TensorStatistic
 from nncf.torch.tensor import PTNNCFTensor
-from nncf.torch.tensor_statistics.collectors import PTMeanMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMeanPercentileStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMedianMADStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMinMaxStatisticCollector
-from nncf.torch.tensor_statistics.collectors import PTMixedMinMaxStatisticCollector
 from nncf.torch.tensor_statistics.collectors import PTNNCFCollectorTensorProcessor
-from nncf.torch.tensor_statistics.collectors import PTPercentileStatisticCollector
+from nncf.torch.tensor_statistics.collectors import get_mean_percentile_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_median_mad_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_min_max_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_mixed_min_max_statistic_collector
+from nncf.torch.tensor_statistics.collectors import get_percentile_tensor_collector
 from nncf.torch.tensor_statistics.statistics import PTMedianMADTensorStatistic
 from nncf.torch.tensor_statistics.statistics import PTMinMaxTensorStatistic
 from nncf.torch.tensor_statistics.statistics import PTPercentileTensorStatistic
@@ -43,16 +40,18 @@ class TestCollectedStatistics:
         ("collector", "reduction_shapes_vs_ref_statistic"),
         [
             (
-                PTMinMaxStatisticCollector,
+                get_min_max_statistic_collector,
                 {
                     ((1,), (0, 1)): PTMinMaxTensorStatistic(
                         min_values=torch.tensor([-4.0]), max_values=torch.tensor([6.1])
                     ),
                     ((3, 1), (1,)): PTMinMaxTensorStatistic(
-                        min_values=torch.tensor([[1.0], [-4.0], [4.0]]), max_values=torch.tensor([[4.5], [4.0], [6.1]])
+                        min_values=torch.tensor([[1.0], [-4.0], [4.0]]),
+                        max_values=torch.tensor([[4.5], [4.0], [6.1]]),
                     ),
                     ((1, 3), (0,)): PTMinMaxTensorStatistic(
-                        min_values=torch.tensor([[-1.3, -4.0, -3.5]]), max_values=torch.tensor([[4.5, 5.8, 6.1]])
+                        min_values=torch.tensor([[-1.3, -4.0, -3.5]]),
+                        max_values=torch.tensor([[4.5, 5.8, 6.1]]),
                     ),
                     # Not supported for now:
                     # ((3, 3), ): PTMinMaxTensorStatistic(
@@ -70,7 +69,11 @@ class TestCollectedStatistics:
                 },
             ),
             (
-                partial(PTMeanMinMaxStatisticCollector, use_per_sample_stats=False),
+                partial(
+                    get_mixed_min_max_statistic_collector,
+                    use_means_of_mins=True,
+                    use_means_of_maxs=True,
+                ),
                 {
                     ((1,), (0, 1)): PTMinMaxTensorStatistic(
                         min_values=torch.tensor([-3.5]), max_values=torch.tensor([6.05])
@@ -80,14 +83,14 @@ class TestCollectedStatistics:
                         max_values=torch.tensor([[3.75], [3.5], [6.05]]),
                     ),
                     ((1, 3), (0,)): PTMinMaxTensorStatistic(
-                        min_values=torch.tensor([[-1.15, -3, -3.25]]), max_values=torch.tensor([[4.25, 5.4, 6.05]])
+                        min_values=torch.tensor([[-1.15, -3, -3.25]]),
+                        max_values=torch.tensor([[4.25, 5.4, 6.05]]),
                     ),
                 },
             ),
             (
                 partial(
-                    PTMixedMinMaxStatisticCollector,
-                    use_per_sample_stats=False,
+                    get_mixed_min_max_statistic_collector,
                     use_means_of_mins=False,
                     use_means_of_maxs=True,
                 ),
@@ -100,7 +103,8 @@ class TestCollectedStatistics:
                         max_values=torch.tensor([[3.75], [3.5], [6.05]]),
                     ),
                     ((1, 3), (0,)): PTMinMaxTensorStatistic(
-                        min_values=torch.tensor([[-1.3, -4.0, -3.5]]), max_values=torch.tensor([[4.25, 5.4, 6.05]])
+                        min_values=torch.tensor([[-1.3, -4.0, -3.5]]),
+                        max_values=torch.tensor([[4.25, 5.4, 6.05]]),
                     ),
                 },
             ),
@@ -109,13 +113,19 @@ class TestCollectedStatistics:
     def test_collected_statistics_with_shape_convert(
         self,
         collector: Type[TensorStatisticCollectorBase],
-        reduction_shapes_vs_ref_statistic: Dict[Tuple[ReductionShape, ReductionShape], TensorStatistic],
+        reduction_shapes_vs_ref_statistic: Dict[Tuple[ReductionAxes, ReductionAxes], TensorStatistic],
     ):
         for shapes in reduction_shapes_vs_ref_statistic.keys():
-            output_shape, reduction_shape = shapes
-            collector_obj = collector(use_abs_max=True, reduction_shape=reduction_shape, output_shape=output_shape)
+            scale_shape, reducer_axes = shapes
+            collector_obj = collector(
+                scale_shape=scale_shape,
+                use_abs_max=True,
+                reduction_axes=reducer_axes,
+                aggregation_axes=(0,),
+                num_samples=None,
+            )
             for input_ in TestCollectedStatistics.REF_INPUTS:
-                collector_obj.register_input(input_)
+                collector_obj.register_input_for_all_reducers(PTNNCFTensor(input_))
             test_stats = collector_obj.get_statistics()
             assert reduction_shapes_vs_ref_statistic[shapes] == test_stats
 
@@ -123,15 +133,20 @@ def test_collected_statistics_with_shape_convert(
         ("collector", "reduction_shapes_vs_ref_statistic"),
         [
             (
-                PTMedianMADStatisticCollector,
+                get_median_mad_statistic_collector,
+                # PTMedianMADStatisticCollector,
                 {
-                    (1,): PTMedianMADTensorStatistic(median_values=torch.tensor([2.8]), mad_values=torch.tensor([2.6])),
+                    (1,): PTMedianMADTensorStatistic(
+                        median_values=torch.tensor([2.8]),
+                        mad_values=torch.tensor([2.6]),
+                    ),
                     (3, 1): PTMedianMADTensorStatistic(
                         median_values=torch.tensor([[2.8], [-2.5], [5.4]]),
                         mad_values=torch.tensor([[0.85], [1.1], [0.65]]),
                     ),
                     (1, 3): PTMedianMADTensorStatistic(
-                        median_values=torch.tensor([[2.5, 2.3, 3.35]]), mad_values=torch.tensor([[1.9, 3.1, 2.7]])
+                        median_values=torch.tensor([[2.5, 2.3, 3.35]]),
+                        mad_values=torch.tensor([[1.9, 3.1, 2.7]]),
                     ),
                     # Not supported for now:
                     # (3, 3): PTMedianMADTensorStatistic(
@@ -149,7 +164,7 @@ def test_collected_statistics_with_shape_convert(
                 },
             ),
             (
-                partial(PTPercentileStatisticCollector, percentiles_to_collect=[10.0]),
+                partial(get_percentile_tensor_collector, percentiles_to_collect=[10.0]),
                 {
                     (1,): PTPercentileTensorStatistic({10.0: torch.tensor([-3.15])}),
                     (3, 1): PTPercentileTensorStatistic({10.0: torch.tensor([[1.5], [-3.75], [4.15]])}),
@@ -167,7 +182,7 @@ def test_collected_statistics_with_shape_convert(
                 },
             ),
             (
-                partial(PTMeanPercentileStatisticCollector, percentiles_to_collect=[10.0]),
+                partial(get_mean_percentile_statistic_collector, percentiles_to_collect=[10.0]),
                 {
                     (1,): PTPercentileTensorStatistic({10.0: torch.tensor([-2.9])}),
                     (3, 1): PTPercentileTensorStatistic({10.0: torch.tensor([[2.0100], [-3.3500], [4.4000]])}),
@@ -189,90 +204,24 @@ def test_collected_statistics_with_shape_convert(
     def test_collected_statistics(
         self,
         collector: Type[TensorStatisticCollectorBase],
-        reduction_shapes_vs_ref_statistic: Dict[ReductionShape, TensorStatistic],
+        reduction_shapes_vs_ref_statistic: Dict[ReductionAxes, TensorStatistic],
     ):
-        for shapes in reduction_shapes_vs_ref_statistic.keys():
-            reduction_shape = shapes
-            collector_obj = collector(reduction_shape=reduction_shape)
+        for reduction_shape in reduction_shapes_vs_ref_statistic:
+            if len(reduction_shape) > 1:
+                reducer_axes = ([dim for dim, val in enumerate(reduction_shape) if val == 1][0],)
+            else:
+                reducer_axes = (0, 1)
+
+            collector_obj = collector(
+                scale_shape=reduction_shape,
+                reduction_axes=reducer_axes,
+                aggregation_axes=(0,),
+                num_samples=None,
+            )
             for input_ in TestCollectedStatistics.REF_INPUTS:
-                collector_obj.register_input(input_)
+                collector_obj.register_input_for_all_reducers(PTNNCFTensor(input_))
             test_stats = collector_obj.get_statistics()
-            assert reduction_shapes_vs_ref_statistic[shapes] == test_stats
-
-    COLLECTORS = [
-        partial(PTMinMaxStatisticCollector, use_abs_max=False, output_shape=(1,)),
-        partial(
-            PTMixedMinMaxStatisticCollector,
-            use_per_sample_stats=False,
-            use_abs_max=False,
-            use_means_of_mins=False,
-            use_means_of_maxs=False,
-            output_shape=(1,),
-        ),
-        partial(PTMeanMinMaxStatisticCollector, use_per_sample_stats=False, use_abs_max=False, output_shape=(1,)),
-        PTMedianMADStatisticCollector,
-        partial(PTPercentileStatisticCollector, percentiles_to_collect=[10.0]),
-        partial(PTMeanPercentileStatisticCollector, percentiles_to_collect=[10.0]),
-    ]
-
-    @pytest.fixture(params=COLLECTORS)
-    def collector_for_interface_test(self, request):
-        collector_type = request.param
-        return collector_type(reduction_shape=(1,))
-
-    def test_collected_samples(self, collector_for_interface_test: TensorStatisticCollectorBase):
-        for input_ in TestCollectedStatistics.REF_INPUTS:
-            collector_for_interface_test.register_input(input_)
-        assert collector_for_interface_test.collected_samples() == len(TestCollectedStatistics.REF_INPUTS)
-
-    def test_reset(self, collector_for_interface_test: TensorStatisticCollectorBase):
-        for input_ in TestCollectedStatistics.REF_INPUTS:
-            collector_for_interface_test.register_input(input_)
-        collector_for_interface_test.reset()
-        assert collector_for_interface_test.collected_samples() == 0
-        with pytest.raises(StatisticsNotCollectedError):
-            collector_for_interface_test.get_statistics()
-
-    def test_enable_disable(self, collector_for_interface_test: TensorStatisticCollectorBase):
-        for input_ in TestCollectedStatistics.REF_INPUTS:
-            collector_for_interface_test.register_input(input_)
-
-        collector_for_interface_test.disable()
-        for input_ in TestCollectedStatistics.REF_INPUTS:
-            collector_for_interface_test.register_input(input_)
-        assert collector_for_interface_test.collected_samples() == len(TestCollectedStatistics.REF_INPUTS)
-
-        collector_for_interface_test.enable()
-        for input_ in TestCollectedStatistics.REF_INPUTS:
-            collector_for_interface_test.register_input(input_)
-        assert collector_for_interface_test.collected_samples() == 2 * len(TestCollectedStatistics.REF_INPUTS)
-
-    OFFLINE_COLLECTORS = [
-        partial(
-            PTMixedMinMaxStatisticCollector,
-            use_per_sample_stats=False,
-            use_abs_max=False,
-            use_means_of_mins=False,
-            use_means_of_maxs=False,
-            output_shape=(1,),
-        ),
-        partial(PTMeanMinMaxStatisticCollector, use_per_sample_stats=False, use_abs_max=False, output_shape=(1,)),
-        PTMedianMADStatisticCollector,
-        partial(PTPercentileStatisticCollector, percentiles_to_collect=[10.0]),
-        partial(PTMeanPercentileStatisticCollector, percentiles_to_collect=[10.0]),
-    ]
-
-    REF_NUM_SAMPLES = 3
-
-    @pytest.fixture(params=OFFLINE_COLLECTORS)
-    def collector_for_num_samples_test(self, request):
-        collector_type = request.param
-        return collector_type(reduction_shape=(1,), num_samples=TestCollectedStatistics.REF_NUM_SAMPLES)
-
-    def test_num_samples(self, collector_for_num_samples_test: OfflineTensorStatisticCollector):
-        for input_ in TestCollectedStatistics.REF_INPUTS * 10:
-            collector_for_num_samples_test.register_input(input_)
-        assert collector_for_num_samples_test.collected_samples() == TestCollectedStatistics.REF_NUM_SAMPLES
+            assert reduction_shapes_vs_ref_statistic[reduction_shape] == test_stats
 
 
 class TestCollectorTensorProcessor:
diff --git a/tests/torch/test_statistics_aggregator.py b/tests/torch/test_statistics_aggregator.py
index 4a4b8f48914..60bfb99015a 100644
--- a/tests/torch/test_statistics_aggregator.py
+++ b/tests/torch/test_statistics_aggregator.py
@@ -62,7 +62,7 @@ def get_backend_model(self, dataset_samples):
 
     @pytest.fixture
     def is_backend_support_custom_estimators(self) -> bool:
-        return False
+        return True
 
     @pytest.fixture(scope="session")
     def test_params(self):