Skip to content

Commit

Permalink
[LPT] Clamp and ConcatWithDifferentPrecisionsOnChilds: tests fix
Browse files Browse the repository at this point in the history
  • Loading branch information
v-Golubev authored and eshoguli committed Sep 12, 2020
1 parent 047a55e commit 45186c5
Show file tree
Hide file tree
Showing 7 changed files with 65 additions and 60 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt

const auto subValues = constant->cast_vector<float>();
for (size_t i = 1; i < subValues.size(); ++i) {
if (subValues[0] != subValues[i]) {
if (std::abs(subValues[0] - subValues[i]) > 1e-6) {
return false;
}
}
Expand Down Expand Up @@ -95,7 +95,7 @@ bool ClampTransformation::canBeTransformed(const TransformationContext& context,
const auto mulConstValues = mulConst->cast_vector<float>();

for (size_t i = 1; i < mulConstValues.size(); ++i) {
if (mulConstValues[0] != mulConstValues[i]) {
if (std::abs(mulConstValues[0] - mulConstValues[i]) > 1e-6) {
return false;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ TEST_P(ConcatWithDifferentChildsTransformation, CompareFunctions) {
const ConcatTransformationTestValues testValues = std::get<2>(GetParam());

actualFunction->validate_nodes_and_infer_types();
auto res = compare_functions(referenceFunction, actualFunction, true);
auto res = compare_functions(referenceFunction, actualFunction, true, true, true);
ASSERT_TRUE(res.first) << res.second;
}

Expand All @@ -158,7 +158,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
{
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 },
{ 256ul, ngraph::Shape({}), {0.f}, {2.55f / 2.f}, {0.f}, { 128.f}, ngraph::element::u8 },
{ {}, {}, { 0.01f } },
{ ngraph::element::f32, {}, { 0.01f } },
{ ngraph::element::f32, {}, { 0.01f } }
}
},
Expand All @@ -174,7 +174,7 @@ const std::vector<ConcatTransformationTestValues> testValues = {
{
{ 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8 },
{ 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-64.f}, { 64.f}, ngraph::element::i8 },
{ {}, {}, { 0.01f } },
{ ngraph::element::f32, {}, { 0.01f } },
{ ngraph::element::f32, {}, { 0.01f } }
}
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,20 +54,15 @@ const std::vector<LayerTestsDefinitions::ClampTransformationParam> params{
// per-channel quantization with the same values
{
{
256ul,
ngraph::Shape{ 1, 3, 1, 1 },
256ul, ngraph::Shape{ 1, 3, 1, 1 },
{ -127.f, -127.f, -127.f },
{ 128.f, 128.f, 128.f },
{ 0.f, 0.f, 0.f },
{ 25.5f, 25.5f, 25.5f }
},
{
{},
{{0.f, 0.f, 0.f}},
{{0.1f, 0.1f, 0.1f}}
{ 255.f, 255.f, 255.f }
},
{},
0.0,
128.0
255.0
},
// per-channel quantization with different values
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ const std::vector<ngraph::element::Type> netPrecisions = {
};

const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> trasformationParamValues = {
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(true),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(false),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8()
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(true),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(false),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsI8I8(),
LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8()
};

const std::vector<LayerTestsUtils::LayerTransformation::LptVersion> versions = {
Expand Down Expand Up @@ -54,16 +54,15 @@ const std::vector<LayerTestsDefinitions::ClampTransformationParam> params = {
// per-channel quantization with the same values
{
{
256ul,
ngraph::Shape{ 1, 3, 1, 1 },
256ul, ngraph::Shape{ 1, 3, 1, 1 },
{ -127.f, -127.f, -127.f },
{ 128.f, 128.f, 128.f },
{ 0.f, 0.f, 0.f },
{ 255.f, 255.f, 255.f }
},
{ },
{},
0.0,
128.0
255.0
},
// per-channel quantization with different values
{
Expand All @@ -84,7 +83,7 @@ const std::vector<LayerTestsDefinitions::ClampTransformationParam> params = {
INSTANTIATE_TEST_CASE_P(LPT, ClampTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(ngraph::Shape({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(trasformationParamValues),
::testing::ValuesIn(versions),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,9 @@ void ClampTransformation::validateNGraph() {
std::shared_ptr<ngraph::Node> parent = output->get_input_node_shared_ptr(0);
ASSERT_FALSE(parent == nullptr);
const std::string typeName = parent->get_type_name();

if (!param.dequantizationAfter.empty()) {
EXPECT_TRUE((typeName == "ScaleShiftIE") || (typeName == "ConvolutionIE"));
if (typeName == "ScaleShiftIE") {
if (params.updatePrecisions) {
if (!param.dequantizationAfter.empty()) {
EXPECT_EQ("ScaleShiftIE", typeName);
EXPECT_EQ(3, parent->get_input_size());

const auto expectedScale = param.dequantizationAfter.multiply.values;
Expand All @@ -84,7 +83,9 @@ void ClampTransformation::validateNGraph() {
EXPECT_EQ(expectedShift.size(), actualShift.size());
}
} else {
EXPECT_EQ("Clamp", typeName);
if (!param.dequantizationAfter.empty()) {
EXPECT_EQ("ConvolutionIE", typeName);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ void ConcatWithDifferentChildsTransformation::SetUp() {

function = ngraph::builder::subgraph::ConcatFunction::getOriginalWithDifferentPrecisionOnChilds(
netPrecision, inputShapes, param.fqOnData1, param.fqOnData2);

if (version == LptVersion::cnnNetwork) {
validate();
}
Expand Down Expand Up @@ -94,7 +93,7 @@ void ConcatWithDifferentChildsTransformation::validate() {

const InferenceEngine::CNNLayerPtr outputLayer0 = getCreatorLayer(outputs.begin()->second).lock();
const InferenceEngine::CNNLayerPtr outputLayer1 = getCreatorLayer((++outputs.begin())->second).lock();
EXPECT_EQ("Clamp", outputLayer0->type);
EXPECT_EQ("ScaleShift", outputLayer0->type);
EXPECT_EQ("ScaleShift", outputLayer1->type);

const InferenceEngine::CNNLayerPtr layer = InferenceEngine::details::CNNNetworkHelper::getParent(*outputLayer1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -343,30 +343,42 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getOriginalWithDifferentPrecis
const std::shared_ptr<ngraph::opset1::Concat> concat = std::make_shared<ngraph::opset1::Concat>(
ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, 1);

ngraph::ResultVector results;
const std::shared_ptr<ngraph::opset1::Clamp> clamp = std::make_shared<ngraph::opset1::Clamp>(concat->output(0), 0.0, 6.0);
results.push_back(std::make_shared<ngraph::opset1::Result>(clamp));

const std::vector<size_t> kernel = { 3, 3 };
const std::vector<size_t> stride = { 1, 1 };
const std::vector<size_t> padBegin = { 0, 0 };
const std::vector<size_t> padEnd = { 0, 0 };
const ngraph::op::PadType padType = ngraph::op::PadType::NOTSET;
const ngraph::op::RoundingType roundingType = ngraph::op::RoundingType::FLOOR;
const std::shared_ptr<ngraph::opset1::MaxPool> maxPool = std::make_shared<ngraph::opset1::MaxPool>(

const auto avgPool = std::make_shared<ngraph::opset1::AvgPool>(
concat->output(0),
stride,
padBegin,
padEnd,
kernel,
true,
roundingType,
padType);
avgPool->set_friendly_name("AvgPool");

const auto maxPool = std::make_shared<ngraph::opset1::MaxPool>(
concat->output(0),
stride,
padBegin,
padEnd,
kernel,
roundingType,
padType);
maxPool->set_friendly_name("MaxPool");

ngraph::ResultVector results;
results.push_back(std::make_shared<ngraph::opset1::Result>(avgPool));
results.push_back(std::make_shared<ngraph::opset1::Result>(maxPool));

std::shared_ptr<ngraph::Function> function = std::make_shared<ngraph::Function>(
results,
ngraph::ParameterVector{ input1, input2 },
"ConcatTransformation");
"ConcatWithDifferentChildsTransformation");

return function;
}
Expand Down Expand Up @@ -851,32 +863,30 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0) }, 1);
concat->set_friendly_name("concat");

ngraph::ResultVector results;
if (!multiChannel) {
std::shared_ptr<ngraph::opset1::Clamp> clamp = std::make_shared<op::TypeRelaxed<ngraph::opset1::Clamp>>(concat, 0, 10);
ngraph::pass::low_precision::NetworkHelper::setOutDataPrecision(clamp, precision);
clamp->set_friendly_name("clamp");

const std::shared_ptr<ngraph::Node> lastDequantization1 = dequantizationOperations1.empty() ?
clamp :
makeDequantization(clamp->output(0), dequantizationOperations1);
results.push_back(std::make_shared<ngraph::opset1::Result>(lastDequantization1));
} else {
const std::shared_ptr<ngraph::Node> lastDequantization1 =
makeDequantization(concat->output(0), dequantizationOperations1);
const auto lastDequantization1 = makeDequantization(concat->output(0), dequantizationOperations1);

const std::shared_ptr<ngraph::opset1::Clamp> clamp = std::make_shared<ngraph::opset1::Clamp>(lastDequantization1, 0.0, 6.0);
clamp->set_friendly_name("clamp");
results.push_back(std::make_shared<ngraph::opset1::Result>(clamp));
}
const std::vector<size_t> kernel = { 3, 3 };
const std::vector<size_t> stride = { 1, 1 };
const std::vector<size_t> padBegin = { 0, 0 };
const std::vector<size_t> padEnd = { 0, 0 };
const ngraph::op::PadType padType = ngraph::op::PadType::NOTSET;
const ngraph::op::RoundingType roundingType = ngraph::op::RoundingType::FLOOR;

const auto avgPool = std::make_shared<ngraph::opset1::AvgPool>(
lastDequantization1,
stride,
padBegin,
padEnd,
kernel,
true,
roundingType,
padType);
avgPool->set_friendly_name("AvgPool");

ngraph::ResultVector results;
results.push_back(std::make_shared<ngraph::opset1::Result>(avgPool));

if (!dequantizationOperations2.empty()) {
const std::vector<size_t> kernel = { 3, 3 };
const std::vector<size_t> stride = { 1, 1 };
const std::vector<size_t> padBegin = { 0, 0 };
const std::vector<size_t> padEnd = { 0, 0 };
const ngraph::op::PadType padType = ngraph::op::PadType::NOTSET;
const ngraph::op::RoundingType roundingType = ngraph::op::RoundingType::FLOOR;
const std::shared_ptr<ngraph::opset1::MaxPool> maxPool = std::make_shared<ngraph::opset1::MaxPool>(
concat->output(0),
stride,
Expand All @@ -887,13 +897,14 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithDifferentPreci
padType);

const std::shared_ptr<ngraph::Node> lastDequantization2 = makeDequantization(maxPool, dequantizationOperations2);
lastDequantization2->set_friendly_name("MaxPool");
results.push_back(std::make_shared<ngraph::opset1::Result>(lastDequantization2));
}

std::shared_ptr<ngraph::Function> function = std::make_shared<ngraph::Function>(
results,
ngraph::ParameterVector{ input1, input2 },
"ConcatWithIntermediateTransformation");
"ConcatWithDifferentChildsTransformation");

if ((fqOnData1.outputPrecision != fqOnData2.outputPrecision)) {
THROW_IE_EXCEPTION << "FakeQuantize expected precisions are different";
Expand Down

0 comments on commit 45186c5

Please sign in to comment.