From a0e4209a702bd480db1e3caf3e5cc74641c1832a Mon Sep 17 00:00:00 2001 From: Edward Shogulin Date: Mon, 19 Oct 2020 00:58:15 +0300 Subject: [PATCH] [LPT] LPT CNNNetwork usage macros: part #2: cmake files update and tests addoption --- .../src/cldnn_engine/cldnn_program.cpp | 2 + .../CMakeLists.txt | 7 + .../src/mkldnn_plugin/mkldnn_exec_network.cpp | 6 +- .../src/mkldnn_plugin/mkldnn_graph.cpp | 1 - .../eltwise_base_transformation.hpp | 1 + .../eltwise_base_transformation.cpp | 2 +- ...ise_transformation_is_broadcasted_test.cpp | 31 +- ...twise_transformation_is_supported_test.cpp | 70 ----- .../low_precision_transformations_test.cpp | 42 +-- .../precision_details_test.cpp | 17 +- ...d_two_output_branches_with_convolution.cpp | 2 +- ...ize_precision_selection_transformation.cpp | 2 +- .../fake_quantize_transformation.cpp | 2 +- ...uantize_and_scale_shift_transformation.cpp | 2 +- .../gemm_transformation.cpp | 2 +- .../layer_transformation.cpp | 42 --- ...put_layers_handling_in_transformations.cpp | 2 +- ...handling_in_transformations_for_concat.cpp | 2 +- ...ansformations_for_concat_multi_channel.cpp | 2 +- .../permute_transformation.cpp | 3 +- .../squeeze_transformation.cpp | 4 +- .../subtract_transformation.cpp | 3 +- .../unsqueeze_transformation.cpp | 4 +- ...d_two_output_branches_with_convolution.cpp | 2 +- ...ize_precision_selection_transformation.cpp | 2 +- .../fake_quantize_transformation.cpp | 2 +- ...uantize_and_scale_shift_transformation.cpp | 2 +- .../gemm_transformation.cpp | 2 +- .../layer_transformation.cpp | 40 --- ...put_layers_handling_in_transformations.cpp | 2 +- ...handling_in_transformations_for_concat.cpp | 2 +- ...ansformations_for_concat_multi_channel.cpp | 2 +- .../permute_transformation.cpp | 3 +- .../squeeze_transformation.cpp | 4 +- .../subtract_transformation.cpp | 3 +- .../unsqueeze_transformation.cpp | 4 +- ...d_two_output_branches_with_convolution.hpp | 2 +- ...ize_precision_selection_transformation.hpp | 2 +- .../fake_quantize_transformation.hpp | 2 +- ...uantize_and_scale_shift_transformation.hpp | 2 +- ...ansformations_for_concat_multi_channel.hpp | 2 +- .../permute_transformation.hpp | 2 +- .../squeeze_transformation.hpp | 2 +- .../unsqueeze_transformation.hpp | 2 +- ...cat_with_different_precision_on_childs.cpp | 2 - ...oncat_with_intermediate_transformation.cpp | 2 - .../concat_with_split_transformation.cpp | 2 - ...d_two_output_branches_with_convolution.cpp | 4 +- ...ize_precision_selection_transformation.cpp | 4 +- .../fake_quantize_transformation.cpp | 4 +- .../fully_connected_transformation.cpp | 1 - ...uantize_and_scale_shift_transformation.cpp | 4 +- .../gemm_transformation.cpp | 8 +- ..._constant_fake_quantize_transformation.cpp | 4 +- ...multiply_with_one_parent_transformaion.cpp | 2 +- ...put_layers_handling_in_transformations.cpp | 6 +- ...handling_in_transformations_for_concat.cpp | 6 +- ...ansformations_for_concat_multi_channel.cpp | 10 +- .../permute_transformation.cpp | 4 +- .../squeeze_transformation.cpp | 6 +- .../subtract_transformation.cpp | 4 +- .../unsqueeze_transformation.cpp | 6 +- .../layer_transformation.cpp | 286 +----------------- .../layer_transformation.hpp | 52 +--- .../functional/cldnn/CMakeLists.txt | 9 +- .../functional/mkldnn/CMakeLists.txt | 9 +- .../functional/shared_tests/CMakeLists.txt | 14 +- ...ecision_transformer_single_layer_tests.hpp | 3 + .../tests_deprecated/helpers/CMakeLists.txt | 2 + .../helpers/tests_common_func.hpp | 1 - 70 files changed, 180 insertions(+), 612 deletions(-) delete mode 100644 inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_supported_test.cpp diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index 77c413762c0dce..63f321f3de7b7e 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -88,9 +88,11 @@ #include #include +#ifdef USE_CNNNETWORK_LPT #include "low_precision_transformations/transformer.hpp" #include "low_precision_transformations/fully_connected.hpp" #include "low_precision_transformations/gemm.hpp" +#endif #include #include diff --git a/inference-engine/src/low_precision_transformations/CMakeLists.txt b/inference-engine/src/low_precision_transformations/CMakeLists.txt index fac16665b65e6a..ac205629185294 100644 --- a/inference-engine/src/low_precision_transformations/CMakeLists.txt +++ b/inference-engine/src/low_precision_transformations/CMakeLists.txt @@ -2,6 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 # +if (NOT USE_CNNNETWORK_LPT) + add_library("inference_engine_lp_transformations" ALIAS "inference_engine_transformations") + return() +endif() + set (TARGET_NAME "inference_engine_lp_transformations") set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include") @@ -49,3 +54,5 @@ install(TARGETS ${TARGET_NAME} RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core) + +target_compile_definitions(${TARGET_NAME} PUBLIC USE_CNNNETWORK_LPT) \ No newline at end of file diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index 8d5735584d565f..e7bad8e156c45e 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -16,11 +16,13 @@ #include #include #include + +#ifdef USE_CNNNETWORK_LPT #include "low_precision_transformations/convolution.hpp" -#include "low_precision_transformations/eltwise.hpp" -#include "low_precision_transformations/fully_connected.hpp" #include "low_precision_transformations/scaleshift_to_convolution.hpp" #include "low_precision_transformations/transformer.hpp" +#endif + #include #include #include diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index ceba7da8431738..98cd6f84c6b84a 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -32,7 +32,6 @@ #include "precision_utils.h" #include -#include "low_precision_transformations/transformer.hpp" #include "utils/blob_dump.h" diff --git a/inference-engine/src/transformations/include/transformations/low_precision/eltwise_base_transformation.hpp b/inference-engine/src/transformations/include/transformations/low_precision/eltwise_base_transformation.hpp index f599a0889995fb..f9bf7a1082a795 100644 --- a/inference-engine/src/transformations/include/transformations/low_precision/eltwise_base_transformation.hpp +++ b/inference-engine/src/transformations/include/transformations/low_precision/eltwise_base_transformation.hpp @@ -18,6 +18,7 @@ class TRANSFORMATIONS_API EltwiseBaseTransformation : public LayerTransformation bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; + static bool isBroadcasted(const Shape& shape) noexcept; protected: int getNotEmpty(const std::shared_ptr& eltwise) const; std::pair getMultiplyConstBranch(const std::shared_ptr& eltwise) const; diff --git a/inference-engine/src/transformations/src/transformations/low_precision/eltwise_base_transformation.cpp b/inference-engine/src/transformations/src/transformations/low_precision/eltwise_base_transformation.cpp index bc5074066cde04..5a78831e086695 100644 --- a/inference-engine/src/transformations/src/transformations/low_precision/eltwise_base_transformation.cpp +++ b/inference-engine/src/transformations/src/transformations/low_precision/eltwise_base_transformation.cpp @@ -14,7 +14,7 @@ using namespace ngraph; using namespace ngraph::pass; using namespace ngraph::pass::low_precision; -bool isBroadcasted(const Shape& shape) noexcept { +bool EltwiseBaseTransformation::isBroadcasted(const Shape& shape) noexcept { const size_t spatialIndex = shape.size() == 1 ? 0ul : (shape.size() == 2ul ? 1ul : 2ul); for (size_t i = spatialIndex; i < shape.size(); ++i) { if (shape[i] != 1ul) { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_broadcasted_test.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_broadcasted_test.cpp index ab91bcde1f0a0f..1e459fb26b4ffa 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_broadcasted_test.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_broadcasted_test.cpp @@ -3,46 +3,47 @@ // #include -#include "low_precision_transformations/eltwise.hpp" +#include "transformations/low_precision/eltwise_base_transformation.hpp" #include using namespace ::testing; using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; + +using namespace ngraph; +using namespace ngraph::pass::low_precision; class EltwiseTransformationIsBroadcastedTests : public ::testing::Test { protected: - const TensorDesc c1 = TensorDesc(Precision::FP32, { 1ul }, Layout::C); - const TensorDesc c1000 = TensorDesc(Precision::FP32, { 1000ul }, Layout::C); - const TensorDesc n1c1 = TensorDesc(Precision::FP32, { 1ul, 1ul }, Layout::NC); - const TensorDesc n1c256 = TensorDesc(Precision::FP32, { 1ul, 256ul }, Layout::NC); - const TensorDesc n1c1000h1w1 = TensorDesc(Precision::FP32, { 1ul, 1000ul, 1ul, 1ul }, Layout::NCHW); - const TensorDesc n1c32h144w144 = TensorDesc(Precision::FP32, { 1ul, 32ul, 144ul, 144ul }, Layout::NCHW); + const Shape c1 = Shape({ 1ul }); + const Shape c1000 = Shape({ 1000ul }); + const Shape n1c1 = Shape({ 1ul, 1ul }); + const Shape n1c256 = Shape({ 1ul, 256ul }); + const Shape n1c1000h1w1 = Shape({ 1ul, 1000ul, 1ul, 1ul }); + const Shape n1c32h144w144 = Shape({ 1ul, 32ul, 144ul, 144ul }); }; TEST_F(EltwiseTransformationIsBroadcastedTests, c1) { - ASSERT_TRUE(EltwiseTransformation::isBroadcasted(c1)); + ASSERT_TRUE(EltwiseBaseTransformation::isBroadcasted(c1)); } TEST_F(EltwiseTransformationIsBroadcastedTests, c1000) { - ASSERT_FALSE(EltwiseTransformation::isBroadcasted(c1000)); + ASSERT_FALSE(EltwiseBaseTransformation::isBroadcasted(c1000)); } TEST_F(EltwiseTransformationIsBroadcastedTests, n1c1) { - ASSERT_TRUE(EltwiseTransformation::isBroadcasted(n1c1)); + ASSERT_TRUE(EltwiseBaseTransformation::isBroadcasted(n1c1)); } TEST_F(EltwiseTransformationIsBroadcastedTests, n1c256) { - ASSERT_FALSE(EltwiseTransformation::isBroadcasted(n1c256)); + ASSERT_FALSE(EltwiseBaseTransformation::isBroadcasted(n1c256)); } TEST_F(EltwiseTransformationIsBroadcastedTests, n1c1000h1w1) { - ASSERT_TRUE(EltwiseTransformation::isBroadcasted(n1c1000h1w1)); + ASSERT_TRUE(EltwiseBaseTransformation::isBroadcasted(n1c1000h1w1)); } TEST_F(EltwiseTransformationIsBroadcastedTests, n1c32h144w144) { - ASSERT_FALSE(EltwiseTransformation::isBroadcasted(n1c32h144w144)); + ASSERT_FALSE(EltwiseBaseTransformation::isBroadcasted(n1c32h144w144)); } diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_supported_test.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_supported_test.cpp deleted file mode 100644 index db9b7b78229331..00000000000000 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/eltwise_transformation_is_supported_test.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "low_precision_transformations/eltwise.hpp" - -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -class EltwiseTransformationIsSupportedTests : public ::testing::Test { -protected: - const TensorDesc n1c1000h1w1 = TensorDesc(Precision::FP32, { 1ul, 1000ul, 1ul, 1ul }, Layout::NCHW); - const TensorDesc n1c2000h1w1 = TensorDesc(Precision::FP32, { 1ul, 1000ul, 1ul, 1ul }, Layout::NCHW); - const TensorDesc n1c1000 = TensorDesc(Precision::FP32, { 1ul, 1000ul }, Layout::NC); - const TensorDesc n1c1 = TensorDesc(Precision::FP32, { 1ul, 1ul }, Layout::NC); - const TensorDesc n1c2000 = TensorDesc(Precision::FP32, { 1ul, 2000ul }, Layout::NC); - const TensorDesc c1 = TensorDesc(Precision::FP32, { 1ul }, Layout::C); - const TensorDesc c1000 = TensorDesc(Precision::FP32, { 1000ul }, Layout::C); - const TensorDesc c2000 = TensorDesc(Precision::FP32, { 2000ul }, Layout::C); -}; - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_n1c2000h1w1) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000h1w1, n1c2000h1w1)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_n1c1000h1w1) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000h1w1, n1c1000h1w1)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_n1c1000) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000h1w1, n1c1000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_n1c2000) { - ASSERT_FALSE(EltwiseTransformation::isSupported(n1c1000h1w1, n1c2000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_c1) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000h1w1, c1)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_c1000) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000h1w1, c1000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000h1w1_and_c2000) { - ASSERT_FALSE(EltwiseTransformation::isSupported(n1c1000h1w1, c2000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000_and_n1c1000) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000, n1c1000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000_and_n1c2000) { - ASSERT_FALSE(EltwiseTransformation::isSupported(n1c1000, n1c2000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c2000h1w1_and_n1c1000) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c2000h1w1, n1c1000)); -} - -TEST_F(EltwiseTransformationIsSupportedTests, n1c1000_and_n1c1) { - ASSERT_TRUE(EltwiseTransformation::isSupported(n1c1000, n1c1)); -} diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/low_precision_transformations_test.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/low_precision_transformations_test.cpp index 775eb795d58fe0..6844f319ba423f 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/low_precision_transformations_test.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/low_precision_transformations_test.cpp @@ -3,50 +3,50 @@ // #include -#include "low_precision_transformations/transformer.hpp" +#include "transformations/low_precision/transformer.hpp" using namespace ::testing; -using namespace InferenceEngine; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; class LowPrecisionTransformationsTests : public Test {}; TEST_F(LowPrecisionTransformationsTests, remove) { LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); - LayerTransformationPtr transformation = transformations.find("ScaleShift"); - ASSERT_NE(nullptr, transformation); + auto transformation = transformations.find("Convolution"); + ASSERT_NE(0, transformation.size()); - transformations.remove("ScaleShift"); - transformation = transformations.find("ScaleShift"); - ASSERT_EQ(nullptr, transformation); + transformations.remove("Convolution"); + transformation = transformations.find("Convolution"); + ASSERT_EQ(0, transformation.size()); } TEST_F(LowPrecisionTransformationsTests, removeBranchSpecificTransformations) { LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); - LayerTransformationPtr transformation = transformations.find("Concat"); - ASSERT_NE(nullptr, transformation); + auto transformation = transformations.find("Concat"); + ASSERT_NE(0, transformation.size()); transformations.removeBranchSpecificTransformations("Concat"); transformation = transformations.find("Concat"); - ASSERT_EQ(nullptr, transformation); + ASSERT_EQ(0, transformation.size()); } TEST_F(LowPrecisionTransformationsTests, removeTransformations) { LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); - LayerTransformationPtr transformation = transformations.find("FullyConnected"); - ASSERT_NE(nullptr, transformation); + auto transformation = transformations.find("MatMul"); + ASSERT_NE(0, transformation.size()); - transformations.removeTransformations("FullyConnected"); - transformation = transformations.find("FullyConnected"); - ASSERT_EQ(nullptr, transformation); + transformations.removeTransformations("MatMul"); + transformation = transformations.find("MatMul"); + ASSERT_EQ(0, transformation.size()); } TEST_F(LowPrecisionTransformationsTests, removeCleanupTransformations) { LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); - LayerTransformationPtr transformation = transformations.find("ScaleShift"); - ASSERT_NE(nullptr, transformation); + auto transformation = transformations.find("Multiply"); + ASSERT_NE(0, transformation.size()); + const size_t originalSize = transformation.size(); - transformations.removeCleanupTransformations("ScaleShift"); - transformation = transformations.find("ScaleShift"); - ASSERT_EQ(nullptr, transformation); + transformations.removeCleanupTransformations("Multiply"); + transformation = transformations.find("Multiply"); + ASSERT_EQ(originalSize - 1, transformation.size()); } diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/precision_details_test.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/precision_details_test.cpp index e77528cae6b1a7..23cb0a8845ecc4 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/precision_details_test.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/precision_details_test.cpp @@ -4,15 +4,14 @@ #include #include -#include "low_precision_transformations/layer_transformation.hpp" -#include "low_precision_transformations/fake_quantize.hpp" +#include "transformations/low_precision/layer_transformation.hpp" +#include "transformations/low_precision/fake_quantize.hpp" #include using namespace ::testing; using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; class PrecisionDetailsTests : public ::testing::Test { protected: @@ -27,7 +26,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsI8levels255WithoutZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(i8levels255WithoutZeroPoint); - ASSERT_EQ(Precision::I8, precisionDetails.precision); + ASSERT_EQ(ngraph::element::i8, precisionDetails.precision); ASSERT_TRUE(precisionDetails.hasNegativeOutput); ASSERT_FALSE(precisionDetails.hasZeroPoint); } @@ -36,7 +35,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsI8levels255WithZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(i8levels255WithZeroPoint); - ASSERT_EQ(Precision::UNSPECIFIED, precisionDetails.precision); + ASSERT_EQ(ngraph::element::undefined, precisionDetails.precision); ASSERT_TRUE(precisionDetails.hasNegativeOutput); ASSERT_TRUE(precisionDetails.hasZeroPoint); } @@ -45,7 +44,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsI8levels256WithoutZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(i8levels256WithoutZeroPoint); - ASSERT_EQ(Precision::I8, precisionDetails.precision); + ASSERT_EQ(ngraph::element::i8, precisionDetails.precision); ASSERT_TRUE(precisionDetails.hasNegativeOutput); ASSERT_FALSE(precisionDetails.hasZeroPoint); } @@ -54,7 +53,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsU8levels256WithoutZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(u8levels256WithoutZeroPoint); - ASSERT_EQ(Precision::U8, precisionDetails.precision); + ASSERT_EQ(ngraph::element::u8, precisionDetails.precision); ASSERT_FALSE(precisionDetails.hasNegativeOutput); ASSERT_FALSE(precisionDetails.hasZeroPoint); } @@ -63,7 +62,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsU8levels256WithZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(u8levels256WithZeroPoint); - ASSERT_EQ(Precision::UNSPECIFIED, precisionDetails.precision); + ASSERT_EQ(ngraph::element::undefined, precisionDetails.precision); ASSERT_FALSE(precisionDetails.hasNegativeOutput); ASSERT_TRUE(precisionDetails.hasZeroPoint); } diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 27448c277e56fc..e22490fc8327e3 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index cfa42f594659e8..e2a077922c0bae 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp index 60278add3fecac..d7e8575838ce0d 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index fc297accf9fc28..d610ba6b30e91f 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp index fabe8486c4eb5d..1cb4019c9aec6f 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp index d1a9b5b34fdea4..713d5064ef7ffa 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp @@ -39,24 +39,11 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "low_precision_transformations/convolution.hpp" -#include "low_precision_transformations/scaleshift_to_convolution.hpp" - #include #include namespace LayerTestsUtils { -InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLowPrecisionTransformations( - const InferenceEngine::details::LayerTransformation::Params& params) const { - return InferenceEngine::details::LowPrecisionTransformer::getAllTransformations(params). - add(InferenceEngine::details::LayerTransformation::Params(params). - setPrecisionsOnActivations({ InferenceEngine::Precision::U8 }), "Convolution"). - addCleanup( - InferenceEngine::details::LayerTransformation::Params(params).setPrecisionsOnActivations({ InferenceEngine::Precision::U8 }), - "ScaleShift"); -} - ngraph::pass::low_precision::LowPrecisionTransformations LayerTransformation::getLowPrecisionTransformationsNGraph( const ngraph::pass::low_precision::LayerTransformation::Params& params) const { return ngraph::pass::low_precision::LowPrecisionTransformer::getAllTransformations(params). @@ -173,35 +160,6 @@ InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const return precision; } -InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) { - // convert to old representation - InferenceEngine::CNNNetwork ngraphNetwork(function); - auto cnnNetworkImp = std::make_shared(ngraphNetwork); - - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32); - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32); - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32); - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32); - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8); - - InferenceEngine::details::LowPrecisionTransformer transformer(transformations); - transformer.transform(*cnnNetworkImp); - - return InferenceEngine::CNNNetwork(cnnNetworkImp); -} - -InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParams() { - return InferenceEngine::details::LayerTransformation::Params( - true, - true, - true, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::UpdateLevel, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - true, - true, - true); -} - ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParams() { return ngraph::pass::low_precision::LayerTransformation::Params( true, diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp index f61df4a6862fa5..ba94bb14fd325a 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp index 7523756eae2d2f..f4d3f638974716 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp index 6b49b3898ad05b..5de00e149e8ad3 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp index 11bc71088901fc..e08ffe206818e6 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp @@ -8,6 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { @@ -15,7 +16,7 @@ const std::vector netPrecisions = { InferenceEngine::Precision::FP16 }; -const std::vector trasformationParamValues = { +const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParams().setUpdatePrecisions(true), LayerTestsUtils::LayerTransformationParamsFactory::createParams().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8() diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp index 100f0e8978cf9c..309c0b6d42977f 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector precisions = { @@ -16,7 +16,7 @@ namespace { }; - const std::vector trasformationParamValues = { + const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8(), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(true), diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp index ed78fd91db97b6..8d549d7212d4b9 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp @@ -8,6 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { @@ -15,7 +16,7 @@ const std::vector netPrecisions = { InferenceEngine::Precision::FP16 }; -const std::vector trasformationParamValues = { +const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParams().setUpdatePrecisions(true), LayerTestsUtils::LayerTransformationParamsFactory::createParams().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8() diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp index ae71c1c588dda9..b762af7586aeaf 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector precisions = { @@ -16,7 +16,7 @@ namespace { }; - const std::vector trasformationParamValues = { + const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8(), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(true), diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index f2658eaf9a0ece..16f00c54e60353 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index ed76aa5606958e..8d4fc7d40b7a8c 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp index fa32ca55cec005..7aa3f6704a6c1f 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fake_quantize_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fake_quantize_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index ad24111306c68a..2e92bddd0a8bc0 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -9,7 +9,7 @@ #include "ngraph_functions/low_precision_transformations/fuse_fake_quantize_and_scale_shift_function.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp index 7e7ae66c4bef6b..dc899145821cff 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/gemm_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp index 36b15446ba5676..b0f0d13bc1bf87 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp @@ -39,12 +39,6 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/low_precision_transformations/layer_transformation.hpp" -#include "low_precision_transformations/transformer.hpp" -#include "low_precision_transformations/convolution.hpp" -#include "low_precision_transformations/scaleshift_to_convolution.hpp" -#include "low_precision_transformations/fully_connected.hpp" -#include "low_precision_transformations/gemm.hpp" - using namespace InferenceEngine::details; #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" @@ -53,15 +47,6 @@ using namespace InferenceEngine::details; namespace LayerTestsUtils { -InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLowPrecisionTransformations( - const InferenceEngine::details::LayerTransformation::Params& params) const { - return LowPrecisionTransformer::getAllTransformations(params) - .add( - InferenceEngine::details::LayerTransformation::Params(params).setSupportAsymmetricQuantization(false), "FullyConnected") - .add( - InferenceEngine::details::LayerTransformation::Params(params).setSupportAsymmetricQuantization(false), "GEMM"); -} - ngraph::pass::low_precision::LowPrecisionTransformations LayerTransformation::getLowPrecisionTransformationsNGraph( const ngraph::pass::low_precision::LayerTransformation::Params& params) const { return ngraph::pass::low_precision::LowPrecisionTransformer::getAllTransformations(params); @@ -199,31 +184,6 @@ InferenceEngine::Precision LayerTransformation::getDeviceInternalPrecision(const return precision; } -InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) { - // convert to old representation - InferenceEngine::CNNNetwork ngraphNetwork(function); - auto cnnNetworkImp = std::make_shared(ngraphNetwork); - - InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32); - - InferenceEngine::details::LowPrecisionTransformer transformer(transformations); - transformer.transform(*cnnNetworkImp); - - return InferenceEngine::CNNNetwork(cnnNetworkImp); -} - -InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParams() { - return InferenceEngine::details::LayerTransformation::Params( - true, - true, - true, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::UpdateLevel, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - true, - true, - true); -} - ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParams() { return ngraph::pass::low_precision::LayerTransformation::Params( true, diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp index e788e3f63161e6..92bfe5a2a19304 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp index e788e3f63161e6..92bfe5a2a19304 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp index e788e3f63161e6..92bfe5a2a19304 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp index 3d5c8c8cbcb33b..a49f6123bbb111 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp @@ -8,13 +8,14 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32 }; -const std::vector trasformationParamValues = { +const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParams() }; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp index b07f1313e5a26b..4fa1d324da0543 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/squeeze_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector precisions = { @@ -16,7 +16,7 @@ namespace { }; - const std::vector trasformationParamValues = { + const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8(), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(true), diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp index ab9e58a56cd9d0..357326bcd1c2a9 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp @@ -8,13 +8,14 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; +using namespace ngraph::pass::low_precision; namespace { const std::vector netPrecisions = { InferenceEngine::Precision::FP32 }; -const std::vector trasformationParamValues = { +const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParams() }; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp index d46a7fac689ba0..40c15ab7953b3c 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/unsqueeze_transformation.cpp @@ -8,7 +8,7 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace InferenceEngine::details; +using namespace ngraph::pass::low_precision; namespace { const std::vector precisions = { @@ -16,7 +16,7 @@ namespace { }; - const std::vector trasformationParamValues = { + const std::vector trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8(), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(false), LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8().setUpdatePrecisions(true), diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp index 84eef464f61037..2be09f64dc6e91 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.hpp @@ -18,7 +18,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeAndTwoOutputBranchesWithConvolutionFunction::ActualValues > FakeQuantizeAndTwoOutputBranchesWithConvolutionParams; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp index 52bd7adb8c8915..ed6788de3dead3 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_precision_selection_transformation.hpp @@ -52,7 +52,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, FakeQuantizePrecisionSelectionTransformationTestValues> FakeQuantizeTransformationParams; class FakeQuantizePrecisionSelectionTransformation : diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp index 04fc18c5456bcd..a80950a3ef14a5 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fake_quantize_transformation.hpp @@ -16,7 +16,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeOnData> FakeQuantizeTransformationParams; class FakeQuantizeTransformation : diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp index 15a75c6ff4a38b..51ac8bfcaaa232 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.hpp @@ -15,7 +15,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, ngraph::builder::subgraph::FakeQuantizeOnData> FuseFakeQuantizeAndScaleShiftTransformationParams; class FuseFakeQuantizeAndScaleShiftTransformation : diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.hpp index 275739765cc9e9..efda48775e670c 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.hpp @@ -15,7 +15,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, bool> OutputLayersHandlingInTransformationsParams; class OutputLayersHandlingInTransformationsForConcatMultiChannel : diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/permute_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/permute_transformation.hpp index 686a7ea806b111..cf8890e9313bd0 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/permute_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/permute_transformation.hpp @@ -15,7 +15,7 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, bool, bool> PermuteTransformationParams; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp index 058ed438845c1e..8dcba7d8c09e2b 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/squeeze_transformation.hpp @@ -24,7 +24,7 @@ std::string stringifySqueezeArgs(const std::vector& axes); typedef std::tuple< InferenceEngine::Precision, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, SqueezeTransformationParam > SqueezeTransformationParams; diff --git a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp index fdaf569d80f263..48531e9aaa7abe 100644 --- a/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/unsqueeze_transformation.hpp @@ -22,7 +22,7 @@ class UnsqueezeTransformationParam { typedef std::tuple< InferenceEngine::Precision, std::string, - InferenceEngine::details::LayerTransformation::Params, + ngraph::pass::low_precision::LayerTransformation::Params, UnsqueezeTransformationParam > UnsqueezeTransformationParams; diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp index ca9c1372dd38ac..970b580f8c5acc 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_different_precision_on_childs.cpp @@ -10,8 +10,6 @@ #include #include -#include "low_precision_transformations/concat.hpp" - #include #include "ngraph_functions/builders.hpp" #include "ngraph_functions/low_precision_transformations/concat_function.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp index 582f93e373051a..a50c3624e9b25c 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_intermediate_transformation.cpp @@ -10,8 +10,6 @@ #include #include -#include "low_precision_transformations/concat.hpp" - #include #include "ngraph_functions/builders.hpp" #include "ngraph_functions/low_precision_transformations/concat_function.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp index f42c03ce7b8c4e..281825cf284d54 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/concat_with_split_transformation.cpp @@ -10,8 +10,6 @@ #include #include -#include "low_precision_transformations/concat.hpp" - #include #include "ngraph_functions/builders.hpp" #include "ngraph_functions/low_precision_transformations/concat_function.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 6ad7b2780c23fe..1b790d5f9319e4 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -27,7 +27,7 @@ std::string FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::getTe InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeAndTwoOutputBranchesWithConvolutionFunction::ActualValues testValues; std::tie(netPrecision, inputShapes, targetDevice, params, testValues) = obj.param; @@ -41,7 +41,7 @@ void FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::SetUp() { InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShape; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeAndTwoOutputBranchesWithConvolutionFunction::ActualValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = this->GetParam(); auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index 2ace15993b9e76..c04ca9ee550abc 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -19,7 +19,7 @@ std::string FakeQuantizePrecisionSelectionTransformation::getTestCaseName(testin InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; FakeQuantizePrecisionSelectionTransformationTestValues testValues; std::tie(netPrecision, inputShapes, targetDevice, params, testValues) = obj.param; @@ -31,7 +31,7 @@ std::string FakeQuantizePrecisionSelectionTransformation::getTestCaseName(testin void FakeQuantizePrecisionSelectionTransformation::SetUp() { InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; FakeQuantizePrecisionSelectionTransformationTestValues testValues; std::tie(netPrecision, inputShape, targetDevice, params, testValues) = this->GetParam(); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp index cd30ddea3fdbbe..420eea7d8446fc 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp @@ -18,7 +18,7 @@ std::string FakeQuantizeTransformation::getTestCaseName(testing::TestParamInfoGetParam(); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 1bdd99a7e12509..d638da1fa42833 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -16,7 +16,6 @@ #include "functional_test_utils/layer_test_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ngraph_functions/pass/convert_prc.hpp" -#include "low_precision_transformations/network_helper.hpp" #include "ngraph_functions/builders.hpp" namespace LayerTestsDefinitions { diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index 8ce75064fd5cd6..035d9a99d8c711 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -18,7 +18,7 @@ std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(testing InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; std::tie(netPrecision, inputShapes, targetDevice, params, fakeQuantizeOnData) = obj.param; @@ -30,7 +30,7 @@ std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(testing void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; std::tie(netPrecision, inputShape, targetDevice, params, fakeQuantizeOnData) = this->GetParam(); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index f3144ca80985d9..cde5e4b29bcccf 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -24,7 +24,7 @@ std::string GemmTransformation::getTestCaseName(testing::TestParamInfoGetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const float low = params.precisionsOnActivations[0] == InferenceEngine::Precision::U8 ? 0.f : -128.f; - const float high = params.precisionsOnActivations[0] == InferenceEngine::Precision::U8 ? 255.f : 127.f; + const float low = params.precisionsOnActivations[0] == ngraph::element::u8 ? 0.f : -128.f; + const float high = params.precisionsOnActivations[0] == ngraph::element::u8 ? 255.f : 127.f; const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize( diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp index e8c2f256b3104c..a9d0d96c06a1e2 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fake_quantize_transformation.cpp @@ -25,7 +25,7 @@ std::string MatMulWithOptimizedConstantFakeQuantizeTransformation::getTestCaseNa InferenceEngine::Precision netPrecision; std::pair shapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; std::tie(netPrecision, shapes, targetDevice, param) = obj.param; @@ -44,7 +44,7 @@ void MatMulWithOptimizedConstantFakeQuantizeTransformation::SetUp() { InferenceEngine::Precision netPrecision; std::pair shapes; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; MatMulWithOptimizedConstantFakeQuantizeTransformationTestValues param; std::tie(netPrecision, shapes, targetDevice, param) = this->GetParam(); auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformaion.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformaion.cpp index 793caede573ebb..465809485dd037 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformaion.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformaion.cpp @@ -33,7 +33,7 @@ void MultiplyWithOneParentTransformation::SetUp() { InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShape; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; MultiplyWithOneParentTransformationValues values; std::tie(netPrecision, inputShape, targetDevice, values) = this->GetParam(); auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index ed294af4c871f3..4b7054a51d9012 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -25,7 +25,7 @@ std::string OutputLayersHandlingInTransformations::getTestCaseName(testing::Test InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); @@ -35,7 +35,7 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformations::GenerateInput( InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); const float k = 1.f; @@ -50,7 +50,7 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformations::GenerateInput( void OutputLayersHandlingInTransformations::SetUp() { InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp index 5e4f41c003094a..bfd17a6a00ca59 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp @@ -25,7 +25,7 @@ std::string OutputLayersHandlingInTransformationsForConcat::getTestCaseName(test InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); @@ -35,7 +35,7 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcat::Gener InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); if ((info.name() != "input1") && (info.name() != "input2")) { @@ -66,7 +66,7 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcat::Gener void OutputLayersHandlingInTransformationsForConcat::SetUp() { InferenceEngine::SizeVector inputShape1; InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape1, targetDevice, params) = this->GetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp index bd0ac99b436202..9bc98c63ae47f2 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp @@ -21,8 +21,8 @@ namespace LayerTestsDefinitions { -std::pair outputLayersHandlingInTransformationsForConcatMultiChannelGetInterval(const std::vector& precisions) { - const bool unsignedInterval = std::find(precisions.begin(), precisions.end(), InferenceEngine::Precision::U8) != precisions.end(); +std::pair outputLayersHandlingInTransformationsForConcatMultiChannelGetInterval(const std::vector& precisions) { + const bool unsignedInterval = std::find(precisions.begin(), precisions.end(), ngraph::element::u8) != precisions.end(); const float low = unsignedInterval ? 0.f : -128.f; const float hight = unsignedInterval ? 255.f : 127.f; return std::make_pair(low, hight); @@ -33,7 +33,7 @@ std::string OutputLayersHandlingInTransformationsForConcatMultiChannel::getTestC InferenceEngine::Precision netPrecision; InferenceEngine::SizeVector inputShapes; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param; return getTestCaseNameByParams(netPrecision, inputShapes, targetDevice, params); @@ -43,7 +43,7 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcatMultiCh InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; std::string targetDevice; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); if ((info.name() != "input1") && (info.name() != "input2")) { @@ -78,7 +78,7 @@ void OutputLayersHandlingInTransformationsForConcatMultiChannel::SetUp() { InferenceEngine::SizeVector inputShape1; InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::tie(netPrecision, inputShape1, targetDevice, params) = this->GetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/permute_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/permute_transformation.cpp index ef288b54151132..cabbda0b698166 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/permute_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/permute_transformation.cpp @@ -25,7 +25,7 @@ std::string PermuteTransformation::getTestCaseName(testing::TestParamInfoGetParam(); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp index 8fceff603344c6..33b9f11ef65461 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/squeeze_transformation.cpp @@ -33,7 +33,7 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& valu InferenceEngine::Blob::Ptr SqueezeTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; SqueezeTransformationParam squeezeParam; std::string targetDevice; @@ -50,7 +50,7 @@ InferenceEngine::Blob::Ptr SqueezeTransformation::GenerateInput(const InferenceE std::string SqueezeTransformation::getTestCaseName(testing::TestParamInfo obj) { InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::string targetDevice; SqueezeTransformationParam squeezeParam; std::tie(netPrecision, targetDevice, params, squeezeParam) = obj.param; @@ -66,7 +66,7 @@ std::string SqueezeTransformation::getTestCaseName(testing::TestParamInfoGetParam(); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp index 9452cfb05f8db9..512e65d4e0f07e 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp @@ -20,7 +20,7 @@ std::string SubtractTransformation::getTestCaseName(testing::TestParamInfoGetParam(); const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index 03ff66ac637b84..bf9cfc59a111cf 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -31,7 +31,7 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector& valu InferenceEngine::Blob::Ptr UnsqueezeTransformation::GenerateInput(const InferenceEngine::InputInfo &info) const { InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; UnsqueezeTransformationParam squeezeParam; std::string targetDevice; @@ -48,7 +48,7 @@ InferenceEngine::Blob::Ptr UnsqueezeTransformation::GenerateInput(const Inferenc std::string UnsqueezeTransformation::getTestCaseName(testing::TestParamInfo obj) { InferenceEngine::Precision netPrecision; - InferenceEngine::details::LayerTransformation::Params params; + ngraph::pass::low_precision::LayerTransformation::Params params; std::string targetDevice; UnsqueezeTransformationParam unsqueezeParam; std::tie(netPrecision, targetDevice, params, unsqueezeParam) = obj.param; @@ -64,7 +64,7 @@ std::string UnsqueezeTransformation::getTestCaseName(testing::TestParamInfoGetParam(); diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.cpp index 5117447ad7c212..cb79063f6a21c6 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.cpp @@ -22,56 +22,12 @@ #include "ngraph_functions/pass/convert_prc.hpp" #include -#include "low_precision_transformations/convolution.hpp" -#include "low_precision_transformations/scaleshift_to_convolution.hpp" using namespace InferenceEngine; using namespace ngraph; namespace LayerTestsUtils { -InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParamsU8I8() { - return InferenceEngine::details::LayerTransformation::Params( - true, - true, - true, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - false, - true, - true, - { InferenceEngine::Precision::U8 }, - { InferenceEngine::Precision::I8 }); -} - -InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParamsU8U8() { - return InferenceEngine::details::LayerTransformation::Params( - true, - true, - true, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - false, - true, - true, - { InferenceEngine::Precision::U8 }, - { InferenceEngine::Precision::U8 }); -} - -InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParamsI8I8() { - return InferenceEngine::details::LayerTransformation::Params( - true, - true, - true, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None, - false, - true, - true, - { InferenceEngine::Precision::I8 }, - { InferenceEngine::Precision::I8 }); -} - ngraph::pass::low_precision::LayerTransformation::Params LayerTransformationParamsNGraphFactory::createParamsU8I8() { return ngraph::pass::low_precision::LayerTransformation::Params( true, @@ -97,7 +53,7 @@ LayerTransformation::LayerTransformation() { } InferenceEngine::Blob::Ptr LayerTransformation::GenerateInput( - const ngraph::element::Type_t precision, + const ngraph::element::Type precision, const InferenceEngine::TensorDesc& tensorDesc, const float k) { const auto interval = getQuantizationInterval(precision); @@ -107,108 +63,19 @@ InferenceEngine::Blob::Ptr LayerTransformation::GenerateInput( return FuncTestUtils::createAndFillBlobConsistently(tensorDesc, hight - low, static_cast(low), 1ul); } -InferenceEngine::details::LowPrecisionTransformer LayerTransformation::getLowPrecisionTransformer( - const InferenceEngine::details::LayerTransformation::Params& params) const { - InferenceEngine::details::LowPrecisionTransformer transformer(getLowPrecisionTransformations(params)); - return transformer; -} - ngraph::pass::low_precision::LowPrecisionTransformer LayerTransformation::getLowPrecisionTransformerNGraph( const ngraph::pass::low_precision::LayerTransformation::Params& params) const { ngraph::pass::low_precision::LowPrecisionTransformer transformer(getLowPrecisionTransformationsNGraph(params)); return transformer; } -IE_SUPPRESS_DEPRECATED_START - -void LayerTransformation::checkPrecisions(const InferenceEngine::CNNLayer& layer, const InferenceEngine::Precision& expectedPrecision) { - for (const InferenceEngine::DataWeakPtr insDataWeak : layer.insData) { - const InferenceEngine::DataPtr insData = insDataWeak.lock(); - EXPECT_TRUE(insData != nullptr) << "insert data is nullable"; - const InferenceEngine::Precision inputPrecision = insData->getTensorDesc().getPrecision(); - EXPECT_EQ(getDeviceInternalPrecision(expectedPrecision), inputPrecision) << - "expected input precision " << getDeviceInternalPrecision(expectedPrecision) << " actual precision " << inputPrecision; - } - - for (const InferenceEngine::DataPtr outData : layer.outData) { - const InferenceEngine::Precision outputPrecision = outData->getTensorDesc().getPrecision(); - EXPECT_EQ(getDeviceInternalPrecision(expectedPrecision), outputPrecision) << - "expected output precision " << getDeviceInternalPrecision(expectedPrecision) << " actual precision " << outputPrecision; - } -} - -void LayerTransformation::checkPrecisions( - const InferenceEngine::CNNLayer& layer, - const std::vector>& expectedInputPrecisions, - const std::vector& expectedOutputPrecisions, - const bool asymmetricQuantizationOnData, - const bool asymmetricQuantizationOnWeights) { - EXPECT_EQ(expectedInputPrecisions.size(), layer.insData.size()) << "insert data count is no expected: " << layer.insData.size(); - if (expectedInputPrecisions.size() != layer.insData.size()) { - return; - } - - const auto checkPrecision = []( - const InferenceEngine::CNNLayer& layer, - const std::vector& expectedPrecisions, - const size_t index, - const bool input) { - const InferenceEngine::DataPtr data = input ? layer.insData[index].lock() : layer.outData[index]; - EXPECT_TRUE(data != nullptr) << "data is nullable"; - const InferenceEngine::Precision actualPrecision = data->getTensorDesc().getPrecision(); - - EXPECT_FALSE(std::all_of( - expectedPrecisions.begin(), - expectedPrecisions.end(), - [&](const InferenceEngine::Precision precision) { return getDeviceInternalPrecision(precision) != actualPrecision; })) << - "expected precisions on " << index << (input ? " input" : " output") << " port " << expectedPrecisions << - " actual precision " << actualPrecision; - }; - - if (asymmetricQuantizationOnData || asymmetricQuantizationOnWeights) { - if (asymmetricQuantizationOnData) { - const InferenceEngine::CNNLayerPtr parentOnData = InferenceEngine::details::CNNNetworkHelper::getParent(layer, 0); - checkPrecision(*parentOnData, expectedInputPrecisions[0], 0, true); - } else { - checkPrecision(layer, expectedInputPrecisions[0], 0, true); - } - - if (asymmetricQuantizationOnWeights) { - const InferenceEngine::CNNLayerPtr parentOnWeights = InferenceEngine::details::CNNNetworkHelper::getParent(layer, 1); - checkPrecision(*parentOnWeights, expectedInputPrecisions[1], 1, true); - } else { - checkPrecision(layer, expectedInputPrecisions[1], 1, true); - } - } else { - for (size_t inputIndex = 0ul; inputIndex < layer.insData.size(); ++inputIndex) { - checkPrecision(layer, expectedInputPrecisions[inputIndex], inputIndex, true); - } - } - - checkPrecision(layer, expectedOutputPrecisions, 0, false); -} - -IE_SUPPRESS_DEPRECATED_END - -std::pair LayerTransformation::getQuantizationInterval(const ngraph::element::Type_t precision) { +std::pair LayerTransformation::getQuantizationInterval(const ngraph::element::Type precision) { const bool unsignedInterval = precision == ngraph::element::u8; const float low = unsignedInterval ? 0.f : -128.f; const float hight = unsignedInterval ? 255.f : 127.f; return std::make_pair(low, hight); } -std::string LayerTransformation::toString(const InferenceEngine::details::LayerTransformation::Params& params) { - std::ostringstream result; - result << - (params.supportAsymmetricQuantization ? "asymmetric_" : "symmetric_") << - (params.updatePrecisions ? "" : "notUpdatePrecisions_") << - params.precisionsOnActivations << "_" << - params.precisionsOnWeights << "_" << - params.quantizedTensorAlignmentOnActivations; - - return result.str(); -} - std::string LayerTransformation::toString(const ngraph::pass::low_precision::LayerTransformation::Params& params) { using namespace ngraph::pass::low_precision; std::ostringstream result; @@ -226,7 +93,7 @@ std::string LayerTransformation::getTestCaseNameByParams( const InferenceEngine::Precision precision, const InferenceEngine::SizeVector& inputShapes, const std::string& targetDevice, - const InferenceEngine::details::LayerTransformation::Params& params) { + const ngraph::pass::low_precision::LayerTransformation::Params& params) { std::ostringstream result; result << precision.name() << "_" << ngraph::Shape(inputShapes) << "_" << targetDevice << "_" << toString(params); return result.str(); @@ -242,151 +109,4 @@ std::string LayerTransformation::getTestCaseNameByParams( return result.str(); } -IE_SUPPRESS_DEPRECATED_START - -bool LayerTransformation::fakeQuantizeExists(const InferenceEngine::ICNNNetwork& network) { - auto it = InferenceEngine::details::CNNNetworkIterator(&network); - auto end = details::CNNNetworkIterator(); - while (it != end) { - if (((*it)->type == "FakeQuantize") && (InferenceEngine::details::QuantizationDetails::isSupportedLevel((*it)->GetParamAsUInt("levels")))) { - return true; - } - it++; - } - - return false; -} - -IE_SUPPRESS_DEPRECATED_END - -ngraph::element::Type toNGraph(const InferenceEngine::Precision precision) { - switch (precision) { - case InferenceEngine::Precision::U8: { - return ngraph::element::u8; - } - case InferenceEngine::Precision::I8: { - return ngraph::element::i8; - } - default: { - THROW_IE_EXCEPTION << "unknown precision " << precision; - } - } -} - -InferenceEngine::Precision toNGraph(const ngraph::element::Type precision) { - switch (precision) { - case ngraph::element::Type_t::u8: { - return InferenceEngine::Precision::U8; - } - case ngraph::element::Type_t::i8: { - return InferenceEngine::Precision::I8; - } - case ngraph::element::Type_t::f32: { - return InferenceEngine::Precision::FP32; - } - case ngraph::element::Type_t::f16: { - return InferenceEngine::Precision::FP16; - } - default: { - THROW_IE_EXCEPTION << "unknown precision " << precision; - } - } -} - -std::vector toNGraph(const std::vector& precisions) { - std::vector resultPrecisions(precisions.size()); - for (size_t i = 0ul; i < precisions.size(); ++i) { - const InferenceEngine::Precision precision = precisions[i]; - resultPrecisions[i] = toNGraph(precision); - } - return resultPrecisions; -} - -std::vector toCNNNetwork(const std::vector& precisions) { - std::vector resultPrecisions(precisions.size()); - for (size_t i = 0ul; i < precisions.size(); ++i) { - const ngraph::element::Type precision = precisions[i]; - resultPrecisions[i] = toNGraph(precision); - } - return resultPrecisions; -} - -ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment toNGraph( - InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment aligment) { - switch (aligment) { - case InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::UpdateLevel: { - return ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment::UpdateLevel; - } - case InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None: { - return ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment::None; - } - default: { - THROW_IE_EXCEPTION << "not supported"; - } - } -} - -InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment toCNNNetwork( - ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment aligment) { - switch (aligment) { - case ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment::UpdateLevel: { - return InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::UpdateLevel; - } - case ngraph::pass::low_precision::LayerTransformation::QuantizedTensorAlignment::None: { - return InferenceEngine::details::LayerTransformation::QuantizedTensorAlignment::None; - } - default: { - THROW_IE_EXCEPTION << "not supported"; - } - } -} - -ngraph::pass::low_precision::LayerTransformation::Params LayerTransformation::toNGraph(const InferenceEngine::details::LayerTransformation::Params& params) { - const auto precisionsOnActivations = LayerTestsUtils::toNGraph(params.precisionsOnActivations); - const auto precisionsOnWeights = LayerTestsUtils::toNGraph(params.precisionsOnWeights); - return ngraph::pass::low_precision::LayerTransformation::Params( - params.updatePrecisions, - LayerTestsUtils::toNGraph(params.quantizedTensorAlignmentOnActivations), - LayerTestsUtils::toNGraph(params.quantizedTensorAlignmentOnWeights), - params.supportAsymmetricQuantization, - precisionsOnActivations, - precisionsOnWeights); -} - -InferenceEngine::details::LayerTransformation::Params LayerTransformation::toCNNNetwork(const pass::low_precision::LayerTransformation::Params& params) { - const auto precisionsOnActivations = LayerTestsUtils::toCNNNetwork(params.precisionsOnActivations); - const auto precisionsOnWeights = LayerTestsUtils::toCNNNetwork(params.precisionsOnWeights); - return InferenceEngine::details::LayerTransformation::Params( - params.updatePrecisions, - true, - true, - LayerTestsUtils::toCNNNetwork(params.quantizedTensorAlignmentOnActivations), - LayerTestsUtils::toCNNNetwork(params.quantizedTensorAlignmentOnWeights), - true, - true, - params.supportAsymmetricQuantization, - precisionsOnActivations, - precisionsOnWeights); -} - -InferenceEngine::Precision LayerTransformation::toCNNNetwork(const ngraph::element::Type_t precision) { - switch (precision) { - case ngraph::element::Type_t::i8: { - return InferenceEngine::Precision::I8; - } - case ngraph::element::Type_t::u8: { - return InferenceEngine::Precision::U8; - } - case ngraph::element::Type_t::f16: { - return InferenceEngine::Precision::FP16; - } - case ngraph::element::Type_t::f32: { - return InferenceEngine::Precision::FP32; - } - default: { - THROW_IE_EXCEPTION << "not supported precision " << precision; - } - } -} - } // namespace LayerTestsUtils diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.hpp index d5d60a7017de3f..66545fce8e9fa5 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/low_precision_transformations/layer_transformation.hpp @@ -6,25 +6,15 @@ #include #include +#include #include -#include "low_precision_transformations/network_helper.hpp" -#include "low_precision_transformations/convolution.hpp" -#include "low_precision_transformations/scaleshift_to_convolution.hpp" + #include "functional_test_utils/layer_test_utils.hpp" -#include "low_precision_transformations/transformer.hpp" #include namespace LayerTestsUtils { -class LayerTransformationParamsFactory { -public: - static InferenceEngine::details::LayerTransformation::Params createParamsU8I8(); - static InferenceEngine::details::LayerTransformation::Params createParamsU8U8(); - static InferenceEngine::details::LayerTransformation::Params createParamsI8I8(); - static InferenceEngine::details::LayerTransformation::Params createParams(); -}; - class LayerTransformationParamsNGraphFactory { public: static ngraph::pass::low_precision::LayerTransformation::Params createParamsU8I8(); @@ -32,31 +22,23 @@ class LayerTransformationParamsNGraphFactory { static ngraph::pass::low_precision::LayerTransformation::Params createParams(); }; +class LayerTransformationParamsFactory : public LayerTransformationParamsNGraphFactory { +}; + IE_SUPPRESS_DEPRECATED_START class LayerTransformation : virtual public LayerTestsUtils::LayerTestsCommon { -public: - static ngraph::pass::low_precision::LayerTransformation::Params toNGraph(const InferenceEngine::details::LayerTransformation::Params& params); - static InferenceEngine::details::LayerTransformation::Params toCNNNetwork(const ngraph::pass::low_precision::LayerTransformation::Params& params); - static InferenceEngine::Precision toCNNNetwork(const ngraph::element::Type_t precision); - protected: LayerTransformation(); static InferenceEngine::Blob::Ptr GenerateInput( - const ngraph::element::Type_t precision, + const ngraph::element::Type precision, const InferenceEngine::TensorDesc& tensorDesc, const float k = 1.f); - InferenceEngine::details::LowPrecisionTransformations getLowPrecisionTransformations( - const InferenceEngine::details::LayerTransformation::Params& params) const; - ngraph::pass::low_precision::LowPrecisionTransformations getLowPrecisionTransformationsNGraph( const ngraph::pass::low_precision::LayerTransformation::Params& params) const; - InferenceEngine::details::LowPrecisionTransformer getLowPrecisionTransformer( - const InferenceEngine::details::LayerTransformation::Params& params) const; - ngraph::pass::low_precision::LowPrecisionTransformer getLowPrecisionTransformerNGraph( const ngraph::pass::low_precision::LayerTransformation::Params& params) const; @@ -64,20 +46,7 @@ class LayerTransformation : virtual public LayerTestsUtils::LayerTestsCommon { const ngraph::pass::low_precision::LayerTransformation::Params& params, const ngraph::pass::low_precision::LowPrecisionTransformations additionalTransformations = {}); - InferenceEngine::CNNNetwork transform(const InferenceEngine::details::LowPrecisionTransformations& transformations); - - static void checkPrecisions(const InferenceEngine::CNNLayer& layer, const InferenceEngine::Precision& expectedPrecision); - - static void checkPrecisions( - const InferenceEngine::CNNLayer& layer, - const std::vector>& expectedInputPrecisions, - const std::vector& expectedOutputPrecisions, - const bool asymmetricQuantizationOnData = false, - const bool asymmetricQuantizationOnWeights = false); - - static std::pair getQuantizationInterval(const ngraph::element::Type_t precision); - - static std::string toString(const InferenceEngine::details::LayerTransformation::Params& params); + static std::pair getQuantizationInterval(const ngraph::element::Type precision); static std::string toString(const ngraph::pass::low_precision::LayerTransformation::Params& params); @@ -87,15 +56,13 @@ class LayerTransformation : virtual public LayerTestsUtils::LayerTestsCommon { const InferenceEngine::Precision precision, const InferenceEngine::SizeVector& inputShapes, const std::string& targetDevice, - const InferenceEngine::details::LayerTransformation::Params& params); + const ngraph::pass::low_precision::LayerTransformation::Params& params); static std::string getTestCaseNameByParams( const ngraph::element::Type precision, const ngraph::Shape& inputShapes, const std::string& targetDevice, const ngraph::pass::low_precision::LayerTransformation::Params& params); - - static bool fakeQuantizeExists(const InferenceEngine::ICNNNetwork& network); }; IE_SUPPRESS_DEPRECATED_END @@ -104,7 +71,6 @@ typedef std::tuple< InferenceEngine::Precision, InferenceEngine::SizeVector, std::string, - // TODO: refactor: CNNNetwork LPT is detected - InferenceEngine::details::LayerTransformation::Params> LayerTransformationParams; + ngraph::pass::low_precision::LayerTransformation::Params> LayerTransformationParams; } // namespace LayerTestsUtils diff --git a/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt index b8c7453eb93648..7b2ee2f5d2ce8d 100644 --- a/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt @@ -15,14 +15,19 @@ file(GLOB CLDNN_TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/lstm/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/common_single_layer_tests/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/ie_class/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp) + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp) + +if (USE_CNNNETWORK_LPT) + list(APPEND ${CLDNN_TEST_SOURCES} + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp) +endif() list(APPEND TEST_SRC ${CLDNN_TEST_SOURCES}) list(APPEND CLDNN_LIBS IESharedTests inference_engine_lp_transformations + inference_engine_legacy ${CLDNN__IOCL_ICD_LIBPATH}) # try to find VA libraries diff --git a/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt index 09fe185b8d53dd..4d04f89c932cc8 100644 --- a/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt @@ -23,13 +23,18 @@ file(GLOB MKL_DNN_TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/common_single_layer_tests/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/ie_class/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/network_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp ) +if (USE_CNNNETWORK_LPT) + list(APPEND ${MKL_DNN_TEST_SOURCES} + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/network_tests/*.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp) +endif() + list(APPEND MKL_DNN_LIBS IESharedTests inference_engine_lp_transformations + inference_engine_legacy ${Boost_REGEX_LIBRARY}) list(APPEND TEST_SRC ${MKL_DNN_TEST_SOURCES}) diff --git a/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt b/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt index 5ccbdbbb58f830..2fe4332b812e81 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt @@ -13,7 +13,8 @@ list(APPEND SHARED_LIBRARIES ngraphFunctions ) -file(GLOB SHARED_TESTS_SRC +if (USE_CNNNETWORK_LPT) + file(GLOB SHARED_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/lstm/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/network_tests/*.cpp @@ -22,6 +23,13 @@ file(GLOB SHARED_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/transformations/*.hpp ${CMAKE_CURRENT_SOURCE_DIR}/transformations/common/*.cpp ) +else() + file(GLOB SHARED_TESTS_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests/*.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/lstm/*.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/graph_tools/*.cpp + ) +endif() add_library(${TARGET_NAME} STATIC ${SHARED_TESTS_SRC}) add_dependencies(${TARGET_NAME} inference_engine_preproc MultiDevicePlugin mock_engine) @@ -69,3 +77,7 @@ add_dependencies(${TARGET_NAME} HeteroPlugin) # developer package ie_developer_export_targets(${TARGET_NAME}) + +if (USE_CNNNETWORK_LPT) + target_compile_definitions(${TARGET_NAME} PUBLIC USE_CNNNETWORK_LPT) +endif() \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp index 6306d38aec02f2..07d3dd4b141318 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp @@ -13,10 +13,13 @@ #include "cpp_interfaces/impl/ie_plugin_internal.hpp" #include "common/low_precision_tests_utils.hpp" + +#ifdef USE_CNNNETWORK_LPT #include "low_precision_transformations/transformer.hpp" #include "low_precision_transformations/convolution.hpp" #include "low_precision_transformations/network_helper.hpp" #include "low_precision_transformations/eltwise.hpp" +#endif #include "tests_common.hpp" #include "ir_gen_helper.hpp" diff --git a/inference-engine/tests_deprecated/helpers/CMakeLists.txt b/inference-engine/tests_deprecated/helpers/CMakeLists.txt index 2bc66896578215..c68641e136661a 100644 --- a/inference-engine/tests_deprecated/helpers/CMakeLists.txt +++ b/inference-engine/tests_deprecated/helpers/CMakeLists.txt @@ -24,6 +24,8 @@ function(add_helpers target_name) $ $ "${IE_MAIN_SOURCE_DIR}/src/vpu/" + "${IE_MAIN_SOURCE_DIR}/src/plugin_api" + "${IE_MAIN_SOURCE_DIR}/src/legacy_api/include" PRIVATE "${CMAKE_CURRENT_BINARY_DIR}") # TODO: eliminate dependency on samples diff --git a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp b/inference-engine/tests_deprecated/helpers/tests_common_func.hpp index eeee91d540b352..e9de2495db0144 100644 --- a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp +++ b/inference-engine/tests_deprecated/helpers/tests_common_func.hpp @@ -8,7 +8,6 @@ #include #include #include -#include "low_precision_transformations/network_helper.hpp" // use to display additional test info: // 1. low precision transformation parameters