Skip to content

Commit

Permalink
[LPT] ConvolutionBackpropData Transformation fix (#5924)
Browse files Browse the repository at this point in the history
* [LPT] ConvolutionBackpropData: handled incorrect dequantization on weights

* [LPT][TESTS] ConvolutionBackpropData: added test-cases with incorrect dequantization on weights
  • Loading branch information
v-Golubev authored Jun 1, 2021
1 parent 1264376 commit 04b1f22
Show file tree
Hide file tree
Showing 3 changed files with 116 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,20 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext
return false;
}
}

const size_t outChannelsShapeIndex = is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
if (dequantizationOnWeights.subtract) {
const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape();
if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsShapeIndex]) {
return false;
}
}
if (dequantizationOnWeights.multiply) {
const auto mulConstShape = dequantizationOnWeights.multiplyConstant->get_shape();
if (shape_size(mulConstShape) > 1ul && shape_size(mulConstShape) != mulConstShape[outChannelsShapeIndex]) {
return false;
}
}
}

return true;
Expand Down Expand Up @@ -232,6 +246,20 @@ bool WeightableLayerTransformation::isQuantized(std::shared_ptr<Node> layer, boo
}
}

const size_t outChannelsShapeIndex = is_type<opset1::ConvolutionBackpropData>(layer) ? 1ul : 0ul;
if (dequantizationOnWeights.subtract) {
const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape();
if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsShapeIndex]) {
return false;
}
}
if (dequantizationOnWeights.multiply) {
const auto mulConstShape = dequantizationOnWeights.multiplyConstant->get_shape();
if (shape_size(mulConstShape) > 1ul && shape_size(mulConstShape) != mulConstShape[outChannelsShapeIndex]) {
return false;
}
}

return true;
} else if (is_type<opset1::FakeQuantize>(dequantizationOnWeights.data.get_node())) {
return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,86 @@ const std::vector<ConvolutionBackpropDataTransformationTestValues> testValues =
true
}
},
// per-channel dequantization on weights
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{ 255ul, Shape({ 1, 2, 1, 1 }), { 0.f }, { 254.f }, { -1.27f }, { 1.27f } },
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{},
{},
{{}, {}, {{ 0.0002f }, ngraph::element::f32, { 1, 2, 1, 1 }}},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ -125.f }),
true
}
},
// QDq version
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{{ngraph::element::f32}, {}, { std::vector<float>{0.01f, 0.01f} }},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{},
{},
{{}, {}, {{ 0.0002f }, ngraph::element::f32, { 1, 2, 1, 1 }}},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
true
}
},
// issue #56886: unsupported per-batch dequantization on weights
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{ 255ul, Shape({ 8, 1, 1, 1 }), { 0.f }, { 254.f }, { -1.27f }, { 1.27f } },
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{},
{},
op::Constant::create(ngraph::element::f32, ngraph::Shape{}, std::vector<float>{ -1.25f }),
true
}
},
// QDq version
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{{ngraph::element::f32}, {}, { std::vector<float>{0.01f}, ngraph::element::f32, {8, 1, 1, 1} }},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{},
{},
op::Constant::create(ngraph::element::f32, ngraph::Shape{}, std::vector<float>{ 0.02f }),
true
}
},
};

INSTANTIATE_TEST_CASE_P(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,21 @@ const std::vector<LayerTestsDefinitions::ConvolutionBackpropDataTransformationPa
{256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
{{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
""
},
// with incorrect zero point on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// issue #56886: with incorrect dequantization on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{{ngraph::element::f32}, {}, { {4.f, 2.f, 4.f, 2.f, 4.f, 2.f, 4.f, 2.f}, ngraph::element::f32, {8, 1, 1, 1}, false }},
"",
""
}
};

Expand Down

0 comments on commit 04b1f22

Please sign in to comment.