Skip to content

Commit

Permalink
Update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
andrey-churkin committed Sep 26, 2023
1 parent cbeb182 commit 8575880
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 53 deletions.
4 changes: 4 additions & 0 deletions tests/onnx/quantization/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,11 @@ def min_max_quantize_model(
quantization_params = {} if quantization_params is None else quantization_params

advanced_parameters = quantization_params.get("advanced_parameters", AdvancedQuantizationParameters())

# ONNX backend does not support these algorithms
advanced_parameters.disable_bias_correction = True
advanced_parameters.disable_channel_alignment = True
advanced_parameters.smooth_quant_alpha = -1
quantization_params["advanced_parameters"] = advanced_parameters

post_training_quantization = PostTrainingQuantization(subset_size=1, **quantization_params)
Expand Down
5 changes: 2 additions & 3 deletions tests/onnx/quantization/test_ptq_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
from nncf.onnx.statistics.collectors import ONNXMeanMinMaxStatisticCollector
from nncf.onnx.statistics.collectors import ONNXMinMaxStatisticCollector
from nncf.parameters import TargetDevice
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.algorithms.min_max.onnx_backend import ONNXMinMaxAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from tests.common.quantization.metatypes import Conv2dTestMetatype
from tests.common.quantization.metatypes import LinearTestMetatype
Expand All @@ -49,8 +49,7 @@ def get_ignored_patterns(device: TargetDevice = TargetDevice.ANY) -> GraphPatter

@pytest.mark.parametrize("target_device", TargetDevice)
def test_target_device(target_device):
algo = PostTrainingQuantization(target_device=target_device)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(target_device=target_device)
min_max_algo._backend_entity = ONNXMinMaxAlgoBackend()
assert min_max_algo._target_device == target_device

Expand Down
5 changes: 2 additions & 3 deletions tests/openvino/native/quantization/test_ptq_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
from nncf.openvino.graph.nncf_graph_builder import GraphConverter
from nncf.openvino.graph.transformations.commands import OVTargetPoint
from nncf.parameters import TargetDevice
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.algorithms.min_max.openvino_backend import OVMinMaxAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.scopes import IgnoredScope
from tests.common.quantization.metatypes import Conv2dTestMetatype
from tests.common.quantization.metatypes import LinearTestMetatype
Expand All @@ -49,8 +49,7 @@ def get_ignored_patterns(device: TargetDevice = TargetDevice.ANY) -> GraphPatter
# pylint: disable=protected-access
@pytest.mark.parametrize("target_device", [TargetDevice.CPU, TargetDevice.GPU, TargetDevice.VPU])
def test_target_device(target_device):
algo = PostTrainingQuantization(target_device=target_device)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(target_device=target_device)
min_max_algo._backend_entity = OVMinMaxAlgoBackend()
assert min_max_algo._target_device.value == HW_CONFIG_TYPE_TARGET_DEVICE_MAP[target_device.value]

Expand Down
20 changes: 4 additions & 16 deletions tests/post_training/test_templates/test_ptq_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,8 @@
from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer
from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
from nncf.parameters import ModelType
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.quantization.passes import transform_to_inference_graph
from nncf.quantization.range_estimator import RangeEstimatorParametersSet
from nncf.scopes import IgnoredScope
Expand Down Expand Up @@ -131,12 +129,7 @@ def metatypes_mapping(self):
"range_estimator_params", [RangeEstimatorParametersSet.MINMAX, RangeEstimatorParametersSet.MEAN_MINMAX, None]
)
def test_range_estimator_per_tensor(self, test_params, range_estimator_params):
algo = PostTrainingQuantization(
advanced_parameters=AdvancedQuantizationParameters(
activations_range_estimator_params=range_estimator_params
)
)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(activations_range_estimator_params=range_estimator_params)
min_max_algo._backend_entity = self.get_algo_backend()
assert min_max_algo._range_estimator_params[QuantizerGroup.ACTIVATIONS] == range_estimator_params

Expand All @@ -161,10 +154,7 @@ def test_range_estimator_per_tensor(self, test_params, range_estimator_params):

@pytest.mark.parametrize("quantize_outputs", [False, True])
def test_quantize_outputs(self, test_params, quantize_outputs):
algo = PostTrainingQuantization(
advanced_parameters=AdvancedQuantizationParameters(quantize_outputs=quantize_outputs)
)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(quantize_outputs=quantize_outputs)
min_max_algo._backend_entity = self.get_algo_backend()

nncf_graph = test_params["test_quantize_outputs"]["nncf_graph"]
Expand All @@ -189,8 +179,7 @@ def test_quantize_outputs(self, test_params, quantize_outputs):

def test_ignored_scopes(self, test_params, ignored_scopes_data):
ignored_scope, act_num_ref, weight_num_ref = ignored_scopes_data
algo = PostTrainingQuantization(ignored_scope=ignored_scope)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(ignored_scope=ignored_scope)
min_max_algo._backend_entity = self.get_algo_backend()
assert min_max_algo._ignored_scope == ignored_scope

Expand All @@ -215,8 +204,7 @@ def test_ignored_scopes(self, test_params, ignored_scopes_data):

@pytest.mark.parametrize("model_type", [ModelType.TRANSFORMER])
def test_model_type_pass(self, test_params, model_type):
algo = PostTrainingQuantization(preset=QuantizationPreset.MIXED, model_type=model_type)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(preset=QuantizationPreset.MIXED, model_type=model_type)
min_max_algo._backend_entity = self.get_algo_backend()

nncf_graph = test_params["test_model_type_pass"]["nncf_graph"]
Expand Down
31 changes: 10 additions & 21 deletions tests/post_training/test_templates/test_quantizer_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,8 @@
from nncf.experimental.common.tensor_statistics.collectors import MaxReducer
from nncf.experimental.common.tensor_statistics.collectors import MinReducer
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import QuantizationParameters
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.passes import transform_to_inference_graph
from nncf.quantization.range_estimator import RangeEstimatorParametersSet
from tests.post_training.test_templates.models import NNCFGraphToTest
Expand Down Expand Up @@ -81,8 +80,7 @@ def statistic_collector_parameters(self, request) -> TestGetStatisticsCollectorP
pass

def test_default_quantizer_config(self, single_conv_nncf_graph):
algo = PostTrainingQuantization()
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization()
min_max_algo._backend_entity = self.get_algo_backend()
nncf_graph = single_conv_nncf_graph.nncf_graph
inference_nncf_graph = transform_to_inference_graph(
Expand Down Expand Up @@ -127,18 +125,15 @@ def test_quantizer_config_from_ptq_params_for_CPU(
signed_activations,
single_conv_nncf_graph,
):
algo = PostTrainingQuantization(
min_max_algo = MinMaxQuantization(
preset=preset,
advanced_parameters=AdvancedQuantizationParameters(
activations_quantization_params=QuantizationParameters(
num_bits=activation_bits, per_channel=activation_per_channel, signedness_to_force=signed_activations
),
weights_quantization_params=QuantizationParameters(
num_bits=weight_bits, per_channel=weight_per_channel, signedness_to_force=signed_weights
),
activations_quantization_params=QuantizationParameters(
num_bits=activation_bits, per_channel=activation_per_channel, signedness_to_force=signed_activations
),
weights_quantization_params=QuantizationParameters(
num_bits=weight_bits, per_channel=weight_per_channel, signedness_to_force=signed_weights
),
)
min_max_algo = algo.algorithms[0]
min_max_algo._backend_entity = self.get_algo_backend()
nncf_graph = single_conv_nncf_graph.nncf_graph
inference_nncf_graph = transform_to_inference_graph(
Expand Down Expand Up @@ -179,8 +174,7 @@ def test_quantizer_config_from_ptq_params_for_CPU(
assert quantization_point.qconfig.signedness_to_force == signed_activations

def test_depthwise_conv_default_quantizer_config(self, depthwise_conv_nncf_graph):
algo = PostTrainingQuantization()
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization()
min_max_algo._backend_entity = self.get_algo_backend()
nncf_graph = depthwise_conv_nncf_graph.nncf_graph
inference_nncf_graph = transform_to_inference_graph(
Expand Down Expand Up @@ -223,12 +217,7 @@ def test_get_stat_collector(
statistic_collector_parameters: TestGetStatisticsCollectorParameters,
):
params = statistic_collector_parameters
algo = PostTrainingQuantization(
advanced_parameters=AdvancedQuantizationParameters(
activations_range_estimator_params=range_estimator_params
)
)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(activations_range_estimator_params=range_estimator_params)
min_max_algo._backend_entity = self.get_algo_backend()
q_config = QuantizerConfig(num_bits=8, mode=q_config_mode, per_channel=q_config_per_channel)

Expand Down
14 changes: 7 additions & 7 deletions tests/torch/ptq/test_fq_params_calculation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import nncf
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.torch.model_creation import create_nncf_network
from nncf.torch.nncf_network import NNCFNetwork
Expand Down Expand Up @@ -49,13 +48,14 @@ def transform_fn(sample):

dataset = nncf.Dataset(dataloader, transform_func=transform_fn)

post_training_quantization = PostTrainingQuantization(subset_size=1, **quantization_params)
# Using PTQ, but apply only MinMax
updated_algorithms = []
for algo in post_training_quantization.algorithms:
if isinstance(algo, MinMaxQuantization):
updated_algorithms.append(algo)
post_training_quantization.algorithms = updated_algorithms
advanced_parameters = quantization_params.get("advanced_parameters", AdvancedQuantizationParameters())
advanced_parameters.disable_bias_correction = True
advanced_parameters.disable_channel_alignment = True
advanced_parameters.smooth_quant_alpha = -1
quantization_params["advanced_parameters"] = advanced_parameters

post_training_quantization = PostTrainingQuantization(subset_size=1, **quantization_params)

original_model.eval()
nncf_network = create_nncf_network(original_model, config)
Expand Down
5 changes: 2 additions & 3 deletions tests/torch/ptq/test_ptq_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.advanced_parameters import QuantizationMode
from nncf.quantization.advanced_parameters import QuantizationParameters
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from nncf.quantization.range_estimator import RangeEstimatorParametersSet
from nncf.scopes import IgnoredScope
from nncf.torch.graph.graph import PTTargetPoint
Expand Down Expand Up @@ -94,8 +94,7 @@ def forward(self, x):

@pytest.mark.parametrize("target_device", TargetDevice)
def test_target_device(target_device):
algo = PostTrainingQuantization(target_device=target_device)
min_max_algo = algo.algorithms[0]
min_max_algo = MinMaxQuantization(target_device=target_device)
min_max_algo._backend_entity = PTMinMaxAlgoBackend()
assert min_max_algo._target_device == target_device

Expand Down

0 comments on commit 8575880

Please sign in to comment.