Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pytest warnings #2813

Merged
merged 12 commits into from
Jul 16, 2024
Merged
20 changes: 10 additions & 10 deletions tests/common/accuracy_control/test_calculate_drop.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@


@dataclass
class TestCase:
class AccuracyDropTestCase:
initial_metric: float
quantized_metric: float
drop_type: DropType
Expand All @@ -31,57 +31,57 @@ class TestCase:
"ts",
[
# ABSOLUTE
TestCase(
AccuracyDropTestCase(
initial_metric=0.2923,
quantized_metric=0.3185,
drop_type=DropType.ABSOLUTE,
expected_should_terminate=True,
expected_accuracy_drop=-0.0262,
),
TestCase(
AccuracyDropTestCase(
initial_metric=0.3185,
quantized_metric=0.2923,
drop_type=DropType.ABSOLUTE,
expected_should_terminate=False,
expected_accuracy_drop=0.0262,
),
TestCase(
AccuracyDropTestCase(
initial_metric=-0.2923,
quantized_metric=-0.3185,
drop_type=DropType.ABSOLUTE,
expected_should_terminate=False,
expected_accuracy_drop=0.0262,
),
TestCase(
AccuracyDropTestCase(
initial_metric=-0.3185,
quantized_metric=-0.2923,
drop_type=DropType.ABSOLUTE,
expected_should_terminate=True,
expected_accuracy_drop=-0.0262,
),
# RELATIVE
TestCase(
AccuracyDropTestCase(
initial_metric=0.2923,
quantized_metric=0.3185,
drop_type=DropType.RELATIVE,
expected_should_terminate=True,
expected_accuracy_drop=None,
),
TestCase(
AccuracyDropTestCase(
initial_metric=0.3185,
quantized_metric=0.2923,
drop_type=DropType.RELATIVE,
expected_should_terminate=False,
expected_accuracy_drop=0.08226059,
),
TestCase(
AccuracyDropTestCase(
initial_metric=-0.2923,
quantized_metric=-0.3185,
drop_type=DropType.RELATIVE,
expected_should_terminate=False,
expected_accuracy_drop=0.0896339,
),
TestCase(
AccuracyDropTestCase(
initial_metric=-0.3185,
quantized_metric=-0.2923,
drop_type=DropType.RELATIVE,
Expand All @@ -90,7 +90,7 @@ class TestCase:
),
],
)
def test_calculate_accuracy_drop(ts: TestCase):
def test_calculate_accuracy_drop(ts: AccuracyDropTestCase):
should_terminate, accuracy_drop = calculate_accuracy_drop(
ts.initial_metric, ts.quantized_metric, ts.max_drop, ts.drop_type
)
Expand Down
33 changes: 16 additions & 17 deletions tests/common/quantization/test_minmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,24 +134,23 @@ def test_mode_against_default_map(algo_params, is_error):
qconf_attr_vs_constraint_dict_to_compare = {"mode": QuantizationScheme.SYMMETRIC}

if is_error:
try:
with pytest.raises(nncf.ParameterNotSupportedError):
minmax = MinMaxQuantization(**algo_params)
except nncf.ParameterNotSupportedError:
pytest.xfail("Caught expected error")
minmax = MinMaxQuantization(**algo_params)
for ref_parameter_name, ref_parameter_value in default_values_to_compare[mode_param].items():
parameter_value = getattr(minmax, ref_parameter_name)
assert parameter_value == ref_parameter_value

global_quantizer_constraints = getattr(minmax, "_global_quantizer_constraints")
assert (
global_quantizer_constraints[QuantizerGroup.ACTIVATIONS].qconf_attr_vs_constraint_dict
== qconf_attr_vs_constraint_dict_to_compare
)
assert (
global_quantizer_constraints[QuantizerGroup.WEIGHTS].qconf_attr_vs_constraint_dict
== qconf_attr_vs_constraint_dict_to_compare
)
else:
minmax = MinMaxQuantization(**algo_params)
for ref_parameter_name, ref_parameter_value in default_values_to_compare[mode_param].items():
parameter_value = getattr(minmax, ref_parameter_name)
assert parameter_value == ref_parameter_value

global_quantizer_constraints = getattr(minmax, "_global_quantizer_constraints")
assert (
global_quantizer_constraints[QuantizerGroup.ACTIVATIONS].qconf_attr_vs_constraint_dict
== qconf_attr_vs_constraint_dict_to_compare
)
assert (
global_quantizer_constraints[QuantizerGroup.WEIGHTS].qconf_attr_vs_constraint_dict
== qconf_attr_vs_constraint_dict_to_compare
)


@pytest.mark.parametrize(
Expand Down
13 changes: 8 additions & 5 deletions tests/common/quantization/test_passes.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,30 +26,33 @@
DATA_ROOT = TEST_ROOT / "common" / "data" / "reference_graphs"


class TestModes(Enum):
class ParameterTestModes(Enum):
VALID = "valid"
WRONG_TENSOR_SHAPE = "wrong_dropout_node"
WRONG_PARALLEL_EDGES = "wrong_parallel_edges"

def __str__(self):
return self.value


def _check_graphs(dot_file_name, nncf_graph) -> None:
nx_graph = nncf_graph.get_graph_for_structure_analysis()
path_to_dot = DATA_ROOT / dot_file_name
compare_nx_graph_with_reference(nx_graph, path_to_dot, check_edge_attrs=True)


@pytest.mark.parametrize("mode", [TestModes.VALID, TestModes.WRONG_TENSOR_SHAPE, TestModes.WRONG_PARALLEL_EDGES])
def test_remove_nodes_and_reconnect_graph(mode: TestModes):
@pytest.mark.parametrize("mode", ParameterTestModes)
def test_remove_nodes_and_reconnect_graph(mode: ParameterTestModes):
dot_reference_path_before = Path("passes") / "dropout_synthetic_model_before.dot"
dot_reference_path_after = Path("passes") / "dropout_synthetic_model_after.dot"
dropout_metatype = "DROPOUT_METATYPE"
kwargs = {}
if mode != TestModes.VALID:
if mode != ParameterTestModes.VALID:
kwargs.update({mode.value: True})

nncf_graph = NNCFGraphDropoutRemovingCase(dropout_metatype, **kwargs).nncf_graph

if mode != TestModes.VALID:
if mode != ParameterTestModes.VALID:
with pytest.raises(AssertionError):
remove_nodes_and_reconnect_graph(nncf_graph, [dropout_metatype])
return
Expand Down
34 changes: 19 additions & 15 deletions tests/common/quantization/test_quantizer_removal.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ class Graph:


@dataclass
class TestCase:
class ParameterTestCase:
"""
:param node_name: Quantizer node's name. We want to remove this
quantizer from the model.
Expand All @@ -212,42 +212,46 @@ class TestCase:

TEST_CASES = {
"simple_graph": [
TestCase(
ParameterTestCase(
"fake_quantize_119",
["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"],
["add_117", "conv2d_161"],
),
TestCase("fake_quantize_128", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]),
TestCase("fake_quantize_134", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]),
TestCase(
ParameterTestCase("fake_quantize_128", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]),
ParameterTestCase("fake_quantize_134", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]),
ParameterTestCase(
"fake_quantize_139",
["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"],
["add_117", "conv2d_161"],
),
TestCase("fake_quantize_147", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]),
TestCase("fake_quantize_153", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]),
TestCase(
ParameterTestCase("fake_quantize_147", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]),
ParameterTestCase("fake_quantize_153", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]),
ParameterTestCase(
"fake_quantize_162",
["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"],
["add_117", "conv2d_161"],
),
],
"graph_with_shapeof": [TestCase("fake_quantize_105", ["fake_quantize_105"], ["interpolate_115"])],
"graph_with_shapeof": [ParameterTestCase("fake_quantize_105", ["fake_quantize_105"], ["interpolate_115"])],
"simple_graph_quantize_dequantize": [
TestCase("quantize_37", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]),
TestCase("quantize_39", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]),
ParameterTestCase(
"quantize_37", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]
),
ParameterTestCase(
"quantize_39", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]
),
#
TestCase(
ParameterTestCase(
"quantize_42",
["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"],
["conv2d_46", "add_65"],
),
TestCase(
ParameterTestCase(
"quantize_44",
["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"],
["conv2d_46", "add_65"],
),
TestCase(
ParameterTestCase(
"quantize_63",
["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"],
["conv2d_46", "add_65"],
Expand Down Expand Up @@ -295,7 +299,7 @@ def create_test_params():


@pytest.mark.parametrize("nncf_graph,test_case", TEST_PARAMS, ids=IDS)
def test_find_quantizer_nodes_to_cut(nncf_graph: NNCFGraph, test_case: TestCase):
def test_find_quantizer_nodes_to_cut(nncf_graph: NNCFGraph, test_case: ParameterTestCase):
quantizer_node = nncf_graph.get_node_by_name(test_case.node_name)
# As test graphs are fully connected and does not have readvariable metatype,
# this should work
Expand Down
1 change: 1 addition & 0 deletions tests/common/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
-c ../../constraints.txt
pytest
pytest-cov
pytest-dependency
pytest-mock
pytest-xdist
2 changes: 1 addition & 1 deletion tests/onnx/pytest.ini
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[pytest]
markers =
e2e_ptq: e2e ptq tests
e2e_eval_original_model: original model evaluation
e2e_eval_reference_model: original model evaluation

python_files = test_*
xfail_strict = true
62 changes: 51 additions & 11 deletions tests/onnx/quantization/test_classification_models_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,21 +23,61 @@
from tests.onnx.weightless_model import load_model_topology_with_zeros_weights

TORCHVISION_TEST_DATA = [
(ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(pretrained=True), {}),
(
ModelToTest("resnet18", [1, 3, 224, 224]),
models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("resnet50_cpu_spr", [1, 3, 224, 224]),
models.resnet50(pretrained=True),
models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1),
{"target_device": TargetDevice.CPU_SPR},
),
(ModelToTest("mobilenet_v2", [1, 3, 224, 224]), models.mobilenet_v2(pretrained=True), {}),
(ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), models.mobilenet_v3_small(pretrained=True), {}),
(ModelToTest("inception_v3", [1, 3, 224, 224]), models.inception_v3(pretrained=True), {}),
(ModelToTest("googlenet", [1, 3, 224, 224]), models.googlenet(pretrained=True), {}),
(ModelToTest("vgg16", [1, 3, 224, 224]), models.vgg16(pretrained=True), {}),
(ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), models.shufflenet_v2_x1_0(pretrained=True), {}),
(ModelToTest("squeezenet1_0", [1, 3, 224, 224]), models.squeezenet1_0(pretrained=True), {}),
(ModelToTest("densenet121", [1, 3, 224, 224]), models.densenet121(pretrained=True), {}),
(ModelToTest("mnasnet0_5", [1, 3, 224, 224]), models.mnasnet0_5(pretrained=True), {}),
(
ModelToTest("mobilenet_v2", [1, 3, 224, 224]),
models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]),
models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("inception_v3", [1, 3, 224, 224]),
models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("googlenet", [1, 3, 224, 224]),
models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("vgg16", [1, 3, 224, 224]),
models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]),
models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("squeezenet1_0", [1, 3, 224, 224]),
models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("densenet121", [1, 3, 224, 224]),
models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1),
{},
),
(
ModelToTest("mnasnet0_5", [1, 3, 224, 224]),
models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1),
{},
),
]


Expand Down
2 changes: 1 addition & 1 deletion tests/onnx/quantization/test_opset_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

@pytest.mark.parametrize("opset_version", TEST_OPSETS)
def test_model_opset_version(tmp_path, opset_version):
model = models.mobilenet_v2(pretrained=True)
model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
input_shape = [1, 3, 224, 224]
x = torch.randn(input_shape, requires_grad=False)
torch.onnx.export(model, x, tmp_path / "model.onnx", opset_version=opset_version)
Expand Down
50 changes: 40 additions & 10 deletions tests/onnx/test_nncf_graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,46 @@ def test_compare_nncf_graph_synthetic_models(model_cls_to_test):


CLASSIFICATION_MODEL_DEF_AND_OBJ = [
(ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(pretrained=True)),
(ModelToTest("mobilenet_v2", [1, 3, 224, 224]), models.mobilenet_v2(pretrained=True)),
(ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), models.mobilenet_v3_small(pretrained=True)),
(ModelToTest("inception_v3", [1, 3, 224, 224]), models.inception_v3(pretrained=True)),
(ModelToTest("googlenet", [1, 3, 224, 224]), models.googlenet(pretrained=True)),
(ModelToTest("vgg16", [1, 3, 224, 224]), models.vgg16(pretrained=True)),
(ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), models.shufflenet_v2_x1_0(pretrained=True)),
(ModelToTest("squeezenet1_0", [1, 3, 224, 224]), models.squeezenet1_0(pretrained=True)),
(ModelToTest("densenet121", [1, 3, 224, 224]), models.densenet121(pretrained=True)),
(ModelToTest("mnasnet0_5", [1, 3, 224, 224]), models.mnasnet0_5(pretrained=True)),
(
ModelToTest("resnet18", [1, 3, 224, 224]),
models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1),
),
(
ModelToTest("mobilenet_v2", [1, 3, 224, 224]),
models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1),
),
(
ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]),
models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1),
),
(
ModelToTest("inception_v3", [1, 3, 224, 224]),
models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1),
),
(
ModelToTest("googlenet", [1, 3, 224, 224]),
models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1),
),
(
ModelToTest("vgg16", [1, 3, 224, 224]),
models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1),
),
(
ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]),
models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1),
),
(
ModelToTest("squeezenet1_0", [1, 3, 224, 224]),
models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1),
),
(
ModelToTest("densenet121", [1, 3, 224, 224]),
models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1),
),
(
ModelToTest("mnasnet0_5", [1, 3, 224, 224]),
models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1),
),
]


Expand Down
Loading
Loading