Skip to content

Commit

Permalink
[LPT] ConvolutionBackpropData tests improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
vzinovie committed Apr 23, 2021
1 parent 780ae9b commit 17e2096
Show file tree
Hide file tree
Showing 7 changed files with 150 additions and 42 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,26 @@ const std::vector<ConvolutionBackpropDataTransformationTestValues> testValues =
true
}
},
// QDq version
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, { 128.f }, { 0.02f }},
{{ngraph::element::f32}, { 2.f }, { 0.01f }},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{{}, { { 128.f }, ngraph::element::f32, {}, false }, {}},
{{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, {}},
{{}, {}, {{ 0.0002f }, ngraph::element::f32, { 1 }}},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
true
}
},
// without zero point
{
LayerTransformation::createParamsU8I8(),
Expand All @@ -222,6 +242,26 @@ const std::vector<ConvolutionBackpropDataTransformationTestValues> testValues =
true
}
},
// QDq version
{
LayerTransformation::createParamsU8I8(),
// ActualValues
{
ngraph::element::u8,
{{ngraph::element::f32}, {}, { 0.02f }},
{{ngraph::element::f32}, {}, { 0.01f }},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f })
},
// ExpectedValues
{
ngraph::element::u8,
{},
{},
{{}, {}, {{ 0.0002f }, ngraph::element::f32, {1}}},
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
true
}
},
// per-channel dequantization with the same values
{
LayerTransformation::createParamsU8I8(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,61 @@ const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> tras
};

const std::vector<LayerTestsDefinitions::ConvolutionBackpropDataTransformationParam> params = {
// FQ on weights
// with zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }},
"",
""
},
// without zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }},
"",
""
},
// with incorrect zero point on activations
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }},
"",
""
},
// with incorrect zero point on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
"",
""
},
// QDq on weights
// with zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// without zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{ngraph::element::f32, { 12.f }, { 4.f }},
{{ngraph::element::f32}, {}, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// with incorrect zero point on activations
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
{{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// with incorrect zero point on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,64 @@ const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> tras
};

const std::vector<LayerTestsDefinitions::ConvolutionBackpropDataTransformationParam> params = {
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }},
"",
""
},
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }},
"",
""
},
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{ngraph::element::f32, { 12.f }, { 4.f }},
"",
""
}
// FQ on weights
// with zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { -127.f }, { 127.f }},
"",
""
},
// without zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }},
"",
""
},
// with incorrect zero point on activations
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 254.f }, { 0.f }, { 25.4f }},
"",
""
},
// with incorrect zero point on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{255ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
"",
""
},
// QDq on weights
// with zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// without zero point
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { 0.f }, { 25.5f }},
{{ngraph::element::f32}, {}, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// with incorrect zero point on activations
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 5.f }, { 6.f }, { 5.f }, { 6.f }},
{{ngraph::element::f32}, { {12.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
},
// with incorrect zero point on weights
{
{256ul, ngraph::Shape{1, 1, 1, 1}, { 0.f }, { 255.f }, { -12.7f }, { 12.8f }},
{{ngraph::element::f32}, { {1000.f}, ngraph::element::f32, {}, false }, { {4.f}, ngraph::element::f32, {}, false }},
"",
""
}
};

const std::vector<ngraph::Shape> inputShapes = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,6 @@ class ConvolutionBackpropDataTransformation :
void SetUp() override;

void Run() override;

private:
void validate();
};

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ std::string ConvolutionBackpropDataTransformation::getTestCaseName(testing::Test
result << getTestCaseNameByParams(netPrecision, inputShape, targetDevice, params) << "_" <<
outputShape << "_" <<
param.fakeQuantizeOnData << "_" <<
param.fakeQuantizeOnWeights;
param.fakeQuantizeOnWeights << "_" <<
param.dequantizationOnWeights;
return result.str();
}

Expand Down Expand Up @@ -59,8 +60,6 @@ void ConvolutionBackpropDataTransformation::SetUp() {
outputShape,
param.fakeQuantizeOnData,
weights);

validate();
}

void ConvolutionBackpropDataTransformation::Run() {
Expand All @@ -71,16 +70,6 @@ void ConvolutionBackpropDataTransformation::Run() {
EXPECT_EQ(actualType, params.expectedKernelType);
}

void ConvolutionBackpropDataTransformation::validate() {
ngraph::element::Type netPrecision;
ngraph::Shape inputShape;
ngraph::Shape outputShape;
std::string targetDevice;
ngraph::pass::low_precision::LayerTransformation::Params params;
ConvolutionBackpropDataTransformationParam param;
std::tie(netPrecision, inputShape, outputShape, targetDevice, params, param) = this->GetParam();
}

TEST_P(ConvolutionBackpropDataTransformation, CompareWithRefImpl) {
Run();
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,6 @@ std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::bf16, ngraph::element::Type_t::f32>().run_on_function(function);

ngraph::pass::VisualizeTree("/home/vzinoviev/work/model_dumps/model_after_convert.dot").run_on_function(function);

function->validate_nodes_and_infer_types();

auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,18 @@ std::shared_ptr<Node> ConvolutionBackpropDataFunction::getWeights(
const element::Type& netPrecision,
const builder::subgraph::DequantizationOperations& dequantizationOnWeights,
const std::shared_ptr<opset1::Constant>& value) {
const auto weights = value != nullptr ?
const auto weights =
value != nullptr ?
value :
std::make_shared<opset1::Constant>(
netPrecision,
shape,
std::vector<float>(shape_size(shape), 1));
element::i8,
shape,
std::vector<float>(shape_size(shape), 1));
auto dequantizationStructure = dequantizationOnWeights;
dequantizationStructure.setPrecision(netPrecision);
if (!dequantizationOnWeights.subtract.constantPrecision.is_real()) {
dequantizationStructure.subtract.constantPrecision = dequantizationOnWeights.subtract.constantPrecision;
}
const auto dq = makeDequantization(weights, dequantizationStructure);

return dq;
Expand Down

0 comments on commit 17e2096

Please sign in to comment.