From b17b526009e559a2bfd3099dc61e77ceccd81714 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Wed, 12 May 2021 12:11:27 +0300 Subject: [PATCH 01/27] Add gather 7 to nG Python API and IE IR reader test (#5276) * nG Python api for Gather and IE IR reader tests * also included old IE IR reader test for Gather1 * added a blank line before Gather * style corrections * applied review comments * removed blank line * removed xfailed for Gather7 with batch_dims since CPU was moved to nGraph (...allows to fallback node execution on ngraph evaluate() method in case if optimized implementation is absent) * added new visitor tests --- .../ngraph_reader/gather_tests.cpp | 137 +++++++++++++++++- ngraph/python/src/ngraph/opset6/ops.py | 2 +- ngraph/python/src/ngraph/opset7/__init__.py | 2 +- ngraph/python/src/ngraph/opset7/ops.py | 21 +++ ngraph/python/tests/__init__.py | 1 - .../python/tests/test_ngraph/test_gather.py | 54 +++++++ .../tests/test_ngraph/test_ops_reshape.py | 17 +-- ngraph/test/CMakeLists.txt | 1 + ngraph/test/visitors/op/gather.cpp | 31 ++++ 9 files changed, 242 insertions(+), 24 deletions(-) create mode 100644 ngraph/python/tests/test_ngraph/test_gather.py create mode 100644 ngraph/test/visitors/op/gather.cpp diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/gather_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/gather_tests.cpp index f44528a9d14586..f38aecfe185e3b 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/gather_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/gather_tests.cpp @@ -4,7 +4,8 @@ #include #include "ngraph_reader_tests.hpp" -TEST_F(NGraphReaderTests, ReadGatherNetwork) { + +TEST_F(NGraphReaderTests, Read_Gather1_Network) { std::string model = R"V0G0N( @@ -117,7 +118,133 @@ TEST_F(NGraphReaderTests, ReadGatherNetwork) { )V0G0N"; compareIRs(model, modelV7, 16, [](Blob::Ptr& weights) { - auto* buffer = weights->buffer().as(); - buffer[0] = 0; - }); -} \ No newline at end of file + auto* buffer = weights->buffer().as(); + buffer[0] = 0; + }); +} + +TEST_F(NGraphReaderTests, Read_Gather7_Network) { + std::string model = R"V0G0N( + + + + + + + 2 + 3 + 4 + + + + + + + + 8 + 16 + + + + + + + + + + + + + + 2 + 3 + 4 + + + 8 + 16 + + + + + + 2 + 8 + 16 + 4 + + + + + + + 2 + 8 + 16 + 4 + + + + + + + + + + + +)V0G0N"; + std::string modelV7 = R"V0G0N( + + + + + + 2 + 3 + 4 + + + + + + + 8 + 16 + + + + + + + + 2 + 3 + 4 + + + 8 + 16 + + + + + 2 + 8 + 16 + 4 + + + + + + + + + +)V0G0N"; + compareIRs(model, modelV7, 8, [](Blob::Ptr& weights) { + auto* buffer = weights->buffer().as(); + buffer[0] = 1; + }); +} diff --git a/ngraph/python/src/ngraph/opset6/ops.py b/ngraph/python/src/ngraph/opset6/ops.py index 7260a6fdcf9e85..415ba6b5db200d 100644 --- a/ngraph/python/src/ngraph/opset6/ops.py +++ b/ngraph/python/src/ngraph/opset6/ops.py @@ -84,7 +84,7 @@ def gather_elements( axis: Optional[int] = 0, name: Optional[str] = None, ) -> Node: - """Return a node which performs GatherND. + """Return a node which performs GatherElements. @param data: N-D tensor with data for gathering @param indices: N-D tensor with indices by which data is gathered diff --git a/ngraph/python/src/ngraph/opset7/__init__.py b/ngraph/python/src/ngraph/opset7/__init__.py index 4b7e715982bf0b..08a24529d4151b 100644 --- a/ngraph/python/src/ngraph/opset7/__init__.py +++ b/ngraph/python/src/ngraph/opset7/__init__.py @@ -51,7 +51,7 @@ from ngraph.opset1.ops import fake_quantize from ngraph.opset1.ops import floor from ngraph.opset1.ops import floor_mod -from ngraph.opset1.ops import gather +from ngraph.opset7.ops import gather from ngraph.opset6.ops import gather_elements from ngraph.opset5.ops import gather_nd from ngraph.opset1.ops import gather_tree diff --git a/ngraph/python/src/ngraph/opset7/ops.py b/ngraph/python/src/ngraph/opset7/ops.py index a2b0c26930f3c9..d8b4325adcd5a8 100644 --- a/ngraph/python/src/ngraph/opset7/ops.py +++ b/ngraph/python/src/ngraph/opset7/ops.py @@ -104,6 +104,27 @@ def roll( @nameable_op +def gather( + data: NodeInput, + indices: NodeInput, + axis: NodeInput, + batch_dims: Optional[int] = 0, +) -> Node: + """Return a node which performs Gather. + + @param data: N-D tensor with data for gathering + @param indices: N-D tensor with indices by which data is gathered + @param axis: axis along which elements are gathered + @param batch_dims: number of batch dimensions + @return: The new node which performs Gather + """ + inputs = as_nodes(data, indices, axis) + attributes = { + "batch_dims": batch_dims + } + return _get_node_factory_opset7().create("Gather", inputs, attributes) + + def dft( data: NodeInput, axes: NodeInput, diff --git a/ngraph/python/tests/__init__.py b/ngraph/python/tests/__init__.py index 21a44177a6f665..3fd4136dd3c6c3 100644 --- a/ngraph/python/tests/__init__.py +++ b/ngraph/python/tests/__init__.py @@ -157,7 +157,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): "Not equal to tolerance") xfail_issue_49391 = xfail_test(reason="Roll is not implemented in CPU plugin.") - xfail_issue_49359 = xfail_test(reason="DFT is not implemented in CPU plugin") xfail_issue_49375 = xfail_test(reason="IDFT is not implemented in CPU plugin") xfail_issue_45432 = xfail_test(reason="Einsum is not implemented in CPU plugin.") diff --git a/ngraph/python/tests/test_ngraph/test_gather.py b/ngraph/python/tests/test_ngraph/test_gather.py new file mode 100644 index 00000000000000..08ff0893a8ff07 --- /dev/null +++ b/ngraph/python/tests/test_ngraph/test_gather.py @@ -0,0 +1,54 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import ngraph as ng +import numpy as np + +from tests.test_ngraph.util import run_op_node + + +def test_gather(): + input_data = np.array( + [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 + ).reshape((3, 3)) + input_indices = np.array([0, 2], np.int32).reshape(1, 2) + input_axis = np.array([1], np.int32) + + expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( + (3, 1, 2) + ) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis) + assert np.allclose(result, expected) + + +def test_gather_with_scalar_axis(): + input_data = np.array( + [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 + ).reshape((3, 3)) + input_indices = np.array([0, 2], np.int32).reshape(1, 2) + input_axis = np.array(1, np.int32) + + expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( + (3, 1, 2) + ) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis) + assert np.allclose(result, expected) + + +def test_gather_batch_dims_1(): + + input_data = np.array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 10]], np.float32) + + input_indices = np.array([[0, 0, 4], + [4, 0, 0]], np.int32) + input_axis = np.array([1], np.int32) + batch_dims = 1 + + expected = np.array([[1, 1, 5], + [10, 6, 6]], np.float32) + + result = run_op_node([input_data], ng.gather, input_indices, input_axis, batch_dims) + assert np.allclose(result, expected) diff --git a/ngraph/python/tests/test_ngraph/test_ops_reshape.py b/ngraph/python/tests/test_ngraph/test_ops_reshape.py index 85ec57739fb1f1..8930c49ffc587b 100644 --- a/ngraph/python/tests/test_ngraph/test_ops_reshape.py +++ b/ngraph/python/tests/test_ngraph/test_ops_reshape.py @@ -1,10 +1,10 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import ngraph as ng import numpy as np import pytest -import ngraph as ng from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node, run_op_numeric_data @@ -120,21 +120,6 @@ def test_broadcast_bidirectional(): assert node.get_output_size() == 1 -def test_gather(): - input_data = np.array( - [1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32 - ).reshape((3, 3)) - input_indices = np.array([0, 2], np.int32).reshape(1, 2) - input_axes = np.array([1], np.int32) - - expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape( - (3, 1, 2) - ) - - result = run_op_node([input_data], ng.gather, input_indices, input_axes) - assert np.allclose(result, expected) - - def test_transpose(): input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape( (3, 3, 224, 224) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 07d2632172eb40..d4aabfccf8a884 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -219,6 +219,7 @@ set(SRC visitors/op/elu.cpp visitors/op/extractimagepatches.cpp visitors/op/fake_quantize.cpp + visitors/op/gather.cpp visitors/op/grn.cpp visitors/op/group_conv.cpp visitors/op/interpolate.cpp diff --git a/ngraph/test/visitors/op/gather.cpp b/ngraph/test/visitors/op/gather.cpp new file mode 100644 index 00000000000000..3e6446a07b8e89 --- /dev/null +++ b/ngraph/test/visitors/op/gather.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "ngraph/opsets/opset7.hpp" + +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, gather_v7_op) +{ + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::i32, Shape{2, 3, 4}); + auto indices = make_shared(element::i32, Shape{2}); + auto axis = make_shared(element::i32, Shape{}, 2); + int64_t batch_dims = 1; + + auto gather = make_shared(data, indices, axis, batch_dims); + NodeBuilder builder(gather); + auto g_gather = as_type_ptr(builder.create()); + + EXPECT_EQ(g_gather->get_batch_dims(), gather->get_batch_dims()); +} From 6ecadc1548f78accb8921e0e1f1c2867d59b0d0e Mon Sep 17 00:00:00 2001 From: Taylor Yeonbok Lee Date: Wed, 12 May 2021 18:33:24 +0900 Subject: [PATCH 02/27] [IE CLDNN] Add description for new CLDNN plugin configuration MAX_NUM_THREADS (#5582) --- docs/IE_DG/supported_plugins/CL_DNN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/IE_DG/supported_plugins/CL_DNN.md b/docs/IE_DG/supported_plugins/CL_DNN.md index ea32d2d8ceeaca..995d3da746e3d3 100644 --- a/docs/IE_DG/supported_plugins/CL_DNN.md +++ b/docs/IE_DG/supported_plugins/CL_DNN.md @@ -116,7 +116,7 @@ When specifying key values as raw strings (that is, when using Python API), omit | `KEY_CLDNN_SOURCES_DUMPS_DIR` | `""` | `""` | Final optimized clDNN OpenCL sources dump output directory | | `KEY_GPU_THROUGHPUT_STREAMS` | `KEY_GPU_THROUGHPUT_AUTO`, or positive integer| 1 | Specifies a number of GPU "execution" streams for the throughput mode (upper bound for a number of inference requests that can be executed simultaneously).
This option is can be used to decrease GPU stall time by providing more effective load from several streams. Increasing the number of streams usually is more effective for smaller topologies or smaller input sizes. Note that your application should provide enough parallel slack (e.g. running many inference requests) to leverage full GPU bandwidth. Additional streams consume several times more GPU memory, so make sure the system has enough memory available to suit parallel stream execution. Multiple streams might also put additional load on CPU. If CPU load increases, it can be regulated by setting an appropriate `KEY_CLDNN_PLUGIN_THROTTLE` option value (see above). If your target system has relatively weak CPU, keep throttling low.
The default value is 1, which implies latency-oriented behavior.
`KEY_GPU_THROUGHPUT_AUTO` creates bare minimum of streams to improve the performance; this is the most portable option if you are not sure how many resources your target machine has (and what would be the optimal number of streams).
A positive integer value creates the requested number of streams. | | `KEY_EXCLUSIVE_ASYNC_REQUESTS` | `YES` / `NO` | `NO` | Forces async requests (also from different executable networks) to execute serially.| - +| `KEY_CLDNN_MAX_NUM_THREADS` | `integer value` | `maximum # of HW threads available in host environment` | Specifies the number of CPU threads that can be used for clDNN engine, e.g, JIT compilation of clDNN kernels or clDNN cpu kernel processing. The default value is set as the number of maximum available threads in host environment to minimize the time for LoadNetwork, where the clDNN kernel build time occupies a large portion. Note that if the specified value is larger than the maximum available # of threads or less than zero, it is set as maximum available # of threads. It can be specified with a smaller number than the available HW threads according to the usage scenario, e.g., when the user wants to assign more CPU threads while clDNN plugin is running. Note that setting this value with lower number will affect not only the network loading time but also the cpu layers of clDNN networks that are optimized with multi-threading. | ## Note on Debug Capabilities of the GPU Plugin Inference Engine GPU plugin provides possibility to dump the user custom OpenCL™ kernels to a file to allow you to properly debug compilation issues in your custom kernels. From c76c0eb39e38f2d470b0283911081af31e000d54 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 12 May 2021 12:57:48 +0300 Subject: [PATCH 03/27] Moved ie_tests to legacy (#5561) * Removed obsolete tests files * Removed old SLT * Moved ie_tests to legacy --- .../functional_test_utils/CMakeLists.txt | 1 - .../tests_deprecated/behavior/CMakeLists.txt | 2 +- .../functional/CMakeLists.txt | 1 - .../functional/cldnn/CMakeLists.txt | 62 +- .../functional/gna/CMakeLists.txt | 1 + .../functional/ie_tests/CMakeLists.txt | 36 - .../ie_tests/include/base_matcher.hpp | 30 - .../include/classification_matcher.hpp | 43 -- .../ie_tests/include/custom_matcher.hpp | 80 --- .../ie_tests/include/label_probability.hpp | 70 -- .../functional/ie_tests/include/net_model.hpp | 39 - .../include/object_detection_matcher.hpp | 127 ---- .../include/optimized_network_matcher.hpp | 58 -- .../ie_tests/include/raw_matcher.hpp | 35 - .../ie_tests/include/regression_config.hpp | 186 ----- .../ie_tests/include/regression_reference.hpp | 24 - .../ie_tests/include/regression_tests.hpp | 678 ------------------ .../ie_tests/include/segmentation_matcher.hpp | 75 -- .../functional/ie_tests/src/base_matcher.cpp | 118 --- .../ie_tests/src/classification_matcher.cpp | 297 -------- .../ie_tests/src/custom_matcher.cpp | 430 ----------- .../functional/ie_tests/src/net_model.cpp | 19 - .../ie_tests/src/object_detection_matcher.cpp | 278 ------- .../src/optimized_network_matcher.cpp | 66 -- .../functional/ie_tests/src/raw_matcher.cpp | 363 ---------- .../ie_tests/src/segmentation_matcher.cpp | 239 ------ .../functional/mkldnn/CMakeLists.txt | 53 +- .../functional/mkldnn/dummy.cpp | 1 - .../functional/shared_tests/CMakeLists.txt | 10 +- .../common_single_layer_tests/pool_ref.hpp | 3 +- .../single_layer_tests.hpp | 2 + .../graph_tools_functional_tests.hpp | 37 - .../common_dyn_batch_regression.hpp | 90 --- .../shared_tests/input_tests/parser_tests.hpp | 1 - .../io_blob_tests/cropResize_tests.hpp | 1 - .../shared_tests/lstm/lstm_ir_test.hpp | 91 --- .../single_layer_tests/bin_conv_tests.hpp | 425 ----------- .../deformable_psroi_tests.hpp | 330 --------- .../single_layer_tests/gemm_tests.hpp | 492 ------------- .../single_layer_tests/one_hot_tests.hpp | 208 ------ .../single_layer_tests/permute_tests.hpp | 168 ----- .../single_layer_tests/quantize_tests.hpp | 331 --------- .../single_layer_tests/reduce_tests.hpp | 402 ----------- .../single_layer_tests/resample_tests.hpp | 269 ------- .../single_layer_tests/ti_tests.hpp | 2 - .../functional/vpu/CMakeLists.txt | 4 + .../helpers/reference_regression.cpp | 22 +- .../regression/helpers/vpu_case_common.hpp | 8 +- .../regression/helpers/vpu_case_params.hpp | 36 +- .../helpers/vpu_classification_case.cpp | 1 + .../helpers/vpu_classification_case.hpp | 5 +- .../helpers/vpu_raw_results_case.cpp | 3 + .../vpu/vpu_base/myriad_layers_tests.hpp | 1 + .../tests_deprecated/helpers/CMakeLists.txt | 5 + .../src => helpers}/ie_core_adapter.cpp | 0 .../include => helpers}/ie_core_adapter.hpp | 0 .../helpers/ir_gen_helper.cpp | 81 --- .../helpers/ir_gen_helper.hpp | 40 -- .../helpers/tests_common_func.cpp | 267 ------- .../helpers/tests_common_func.hpp | 89 --- .../unit/engines/mkldnn/dummy.cpp | 4 - 61 files changed, 51 insertions(+), 6789 deletions(-) delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp delete mode 100644 inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp rename inference-engine/tests_deprecated/{functional/ie_tests/src => helpers}/ie_core_adapter.cpp (100%) rename inference-engine/tests_deprecated/{functional/ie_tests/include => helpers}/ie_core_adapter.hpp (100%) delete mode 100644 inference-engine/tests_deprecated/helpers/ir_gen_helper.cpp delete mode 100644 inference-engine/tests_deprecated/helpers/ir_gen_helper.hpp delete mode 100644 inference-engine/tests_deprecated/helpers/tests_common_func.cpp delete mode 100644 inference-engine/tests_deprecated/helpers/tests_common_func.hpp delete mode 100644 inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/CMakeLists.txt b/inference-engine/tests/ie_test_utils/functional_test_utils/CMakeLists.txt index f391e1ce202c36..6ed92869536de3 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/CMakeLists.txt +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/CMakeLists.txt @@ -37,4 +37,3 @@ addIeTarget( ie_faster_build(${TARGET_NAME} PCH PRIVATE "src/precomp.hpp" ) - diff --git a/inference-engine/tests_deprecated/behavior/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/CMakeLists.txt index 8b32bf5c19db8a..0cdfeb9f891387 100644 --- a/inference-engine/tests_deprecated/behavior/CMakeLists.txt +++ b/inference-engine/tests_deprecated/behavior/CMakeLists.txt @@ -8,4 +8,4 @@ disable_deprecated_warnings() if (ENABLE_HDDL OR ENABLE_MYRIAD) add_subdirectory(vpu) -endif() \ No newline at end of file +endif() diff --git a/inference-engine/tests_deprecated/functional/CMakeLists.txt b/inference-engine/tests_deprecated/functional/CMakeLists.txt index 8ee3016d989d41..d6bf3a0d2819e0 100644 --- a/inference-engine/tests_deprecated/functional/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/CMakeLists.txt @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -add_subdirectory(ie_tests) add_subdirectory(shared_tests) disable_deprecated_warnings() diff --git a/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt index 223bf1635f35aa..6c834d0723ecdd 100644 --- a/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt @@ -2,64 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME ClDnnFunctionalTests) +add_executable(ClDnnFunctionalTests dummy.cpp) -file(GLOB CLDNN_TEST_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) - -list(APPEND TEST_SRC ${CLDNN_TEST_SOURCES}) - -list(APPEND CLDNN_LIBS - IESharedTests - ${CLDNN__OCL_ICD_LIBPATH}) - -# try to find VA libraries -find_package(PkgConfig QUIET) -if(PkgConfig_FOUND) - pkg_search_module(LIBVA QUIET libva) -endif() - -# TODO: pkg_search_module finds libva not in sysroot -if(ANDROID) - set(LIBVA_FOUND OFF CACHE BOOL "" FORCE) -endif() - -if(LIBVA_FOUND) - list(APPEND CLDNN_LIBS ${LIBVA_LINK_LIBRARIES}) -endif() - -list(APPEND DEPENDENCIES - clDNNPlugin) - -if (ENABLE_MKL_DNN) - list(APPEND DEPENDENCIES - MKLDNNPlugin - HeteroPlugin) -endif() - -# add OpenCL dependency end - -source_group("src" FILES ${TEST_SRC}) -source_group("include" FILES ${TEST_INCLUDE}) - -add_executable(${TARGET_NAME} - ${TEST_SRC} - ${TEST_INCLUDE}) - -target_compile_definitions(${TARGET_NAME} - PUBLIC ${ARGV} - DATA_PATH=\"${DATA_PATH}\" - MODELS_PATH=\"${MODELS_PATH}\") - -if(LIBVA_FOUND) - target_compile_definitions(${TARGET_NAME} PRIVATE ENABLE_LIBVA) - target_include_directories(${TARGET_NAME} PRIVATE ${LIBVA_INCLUDE_DIRS}) -endif() - -target_include_directories(${TARGET_NAME} PRIVATE ${CLDNN__OCL_ICD_INCDIRS}) -target_link_libraries(${TARGET_NAME} PRIVATE ${CLDNN_LIBS}) - -add_dependencies(${TARGET_NAME} ${DEPENDENCIES}) - -add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) -set_property(TEST ${TARGET_NAME} PROPERTY LABELS GPU) +target_link_libraries(ClDnnFunctionalTests PRIVATE gtest_main) diff --git a/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt b/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt index a09b7cf91a394d..564504b5f78ba8 100644 --- a/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt @@ -41,6 +41,7 @@ target_compile_definitions(${TARGET_NAME} target_link_libraries(${TARGET_NAME} PRIVATE IESharedTests + funcTestUtils ) target_include_directories(${TARGET_NAME} diff --git a/inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt b/inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt deleted file mode 100644 index dc247e8a817b4b..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_NAME ie_tests) - -file(GLOB TEST_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) -file(GLOB TEST_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) - -# create target - -add_library(${TARGET_NAME} STATIC ${TEST_INCLUDE} ${TEST_SRC}) - -ie_faster_build(${TARGET_NAME} - UNITY -) - -list(APPEND EXPORT_DEPENDENCIES - funcTestUtils - ieTestHelpers - ) - -target_include_directories(${TARGET_NAME} - PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" - PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src" - $) - -target_link_libraries(${TARGET_NAME} PUBLIC - format_reader - ${EXPORT_DEPENDENCIES} - ) - -# developer package - -openvino_developer_export_targets(COMPONENT inference_engine_tests - TARGETS ${TARGET_NAME} ${EXPORT_DEPENDENCIES} ieTestHelpers_s) diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp deleted file mode 100644 index 84d2a5e6c37d5d..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "regression_config.hpp" -#include - -namespace Regression { namespace Matchers { - -using namespace InferenceEngine; - -class BaseMatcher { -protected: - RegressionConfig config; -public: - explicit BaseMatcher(const RegressionConfig &config) : config(config) { -#ifndef NDEBUG - std::cout << "Matching on " << config._device_name << std::endl; -#endif - } - - void checkImgNumber(int dynBatch = -1); -}; - -void loadImage(const std::string &imageFilename, InferenceEngine::Blob::Ptr &blob, bool bgr = true, int batchNumber = 1); - -} // namepspace Matchers -} // namespace Regression diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp deleted file mode 100644 index 0b9b0ee81d1215..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "base_matcher.hpp" -#include "regression_reference.hpp" -#include -#include "label_probability.hpp" - -namespace Regression { namespace Matchers { - -// this is one more version of classification matcher for new api of async/sync requests -class ClassificationMatcher : public BaseMatcher { -private: - size_t checkResultNumber; - std::vector _executableNetworks; - std::vector > _results; - ResponseDesc _resp; - InferenceEngine::InputsDataMap _inputsInfo; - InferenceEngine::OutputsDataMap _outputsInfo; - public: - explicit ClassificationMatcher(RegressionConfig &config); - void to(std::string modelType); - void to(const std::vector &expected); - - - private: - void readLabels(std::string labelFilePath); - int getIndexByLabel(const std::string &label); - std::string getLabel(unsigned int index); - void checkResult(size_t checkNumber, - const std::vector &expected); - virtual void match(size_t top); - void match_n(size_t top, int index); - void saveResults(const std::vector &topIndexes, const std::vector &probs, size_t top); - - size_t top = 5; -}; - -} } // namespace matchers diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp deleted file mode 100644 index 4225156dcaa86f..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "base_matcher.hpp" -#include -#include - -IE_SUPPRESS_DEPRECATED_START -namespace Regression { namespace Matchers { - -using namespace InferenceEngine; - -class CustomMatcher : public BaseMatcher { - protected: - InferenceEngine::CNNNetwork network; - InferenceContext ctx; - bool match_in_dctor = false; - int precision; - - public: - - explicit CustomMatcher(const RegressionConfig &config, bool match_in_dctor = false) - : BaseMatcher(config), - match_in_dctor(match_in_dctor), - precision(4) { - if (!match_in_dctor) { - matchCustom(); - checkResult(); - } - } - ~CustomMatcher() { - if (match_in_dctor) { - matchCustom(); - checkResult(); - } - } - - CustomMatcher& withAvgDelta(float value) { - BaseMatcher::config.nearAvgValue = value; - return *this; - } - - CustomMatcher& withDelta(float value) { - BaseMatcher::config.nearValue = value; - return *this; - } - - CustomMatcher& setPrecision(int precision) { - this->precision = precision; - return *this; - } - - void matchCustom(); - - template - inline bool isApproximatelyEqual(TReal a, TReal b, TReal tolerance = std::numeric_limits::epsilon()) - { - TReal diff = std::fabs(a - b); - if (diff <= tolerance) - return true; - - if (diff < std::fmax(std::fabs(a), std::fabs(b)) * tolerance) - return true; - - return false; - } - - void checkResult(); - - protected: - InferenceEngine::ExecutableNetwork createExecutableNetworkFromIR(); - InferenceEngine::ExecutableNetwork createExecutableNetworkFromAOT(); -}; - -} -} // namespace Matchers -IE_SUPPRESS_DEPRECATED_END diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp deleted file mode 100644 index 18b8c931e08fe4..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace Regression { -namespace Reference { - -/** - * @class LabelProbability - * @brief A LabelProbability represents predicted data in easy to use format - */ -class LabelProbability { -private: - /** - * @brief Index of current label - */ - int labelIdx = 0; - /** - * @brief Name of class from file with labels - */ - std::string className; - /** - * @brief The probability of prediction - */ - float probability = 0.0f; - -public: - /** - * @brief A constructor of InferenceResults class - * @param labelIdx - index of current label - * @param probability - the probability of prediction - * @param className - name of class from file with labels - * @return InferenceResults object - */ - LabelProbability(int labelIdx, float probability, std::string className) : labelIdx(labelIdx), - className(className), - probability(probability) {} - - /** - * @brief Gets label index - * @return index of current label - */ - const int &getLabelIndex() const { - return labelIdx; - } - - /** - * @brief Gets label name - * @return label - */ - const std::string &getLabel() const { - return className; - } - - /** - * @brief Gets probability - * @return probability - */ - const float &getProbability() const { - return probability; - } -}; - -} // namespace Reference -} // namespace Regression - diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp deleted file mode 100644 index a96ead95cfd3ec..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -//------------------------------------------------------------------------------ -// class Model -//------------------------------------------------------------------------------ - -class Model { -public: - //Constructors - Model() = default; - explicit Model(const char *that) { - fileName_ = folderName_ = that; - } - - Model(const std::string &folderName, - const std::string &fileName, - const std::string &resolution, - const std::string & extension = "xml"); - - // Accessors - inline std::string folderName() const { return folderName_; }; - inline std::string fileName() const { return fileName_; }; - inline std::string resolution() const { return resolutionName_; }; - inline std::string extension() const { return extensionName_; }; - -private: - std::string folderName_; - std::string fileName_; - std::string resolutionName_; - std::string extensionName_; -}; - diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp deleted file mode 100644 index e862e024c93375..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include "base_matcher.hpp" -#include - -namespace Regression { -namespace Matchers { -//------------------------------------------------------------------------------ -// class ObjectDetectionMatcher -//------------------------------------------------------------------------------ - -class ObjectDetectionMatcher : public BaseMatcher { - -public: - //Helpers - struct DetectedObject; - class ImageDescription; - class NetworkAdapter; - - using ImageDescriptionPtrVect = std::vector>; - using ScoreFunction = std::function; - - //Constructor - ObjectDetectionMatcher(const RegressionConfig &config); - - //Operations - virtual void match(const ScoreFunction&); - void checkResult(const std::vector& desired); - - void to(const ImageDescription &desired, const std::shared_ptr& adapter); - void to(const std::vector& desired, const std::shared_ptr& adapter); - - void to(const ImageDescription &desired, const NetworkAdapter& adapter); - void to(const std::vector& desired, const NetworkAdapter& adapter); - -private: - //Operations - void to(const std::vector& desired, const ScoreFunction&); - //Data section - ImageDescriptionPtrVect res_desc_; -}; - -using DetectedObject = ObjectDetectionMatcher::DetectedObject; -using ImageDescription = ObjectDetectionMatcher::ImageDescription; -using NetworkAdapter = ObjectDetectionMatcher::NetworkAdapter; - -//------------------------------------------------------------------------------ -// class DetectedObject -//------------------------------------------------------------------------------ - -struct ObjectDetectionMatcher::DetectedObject { - //Data section - int objectType; - float xmin, xmax, ymin, ymax, prob; - - //Constructors - DetectedObject(int objectType, float xmin, float ymin, float xmax, float ymax, float prob, int = -1); - DetectedObject(const DetectedObject& other); - - static float ioU(const DetectedObject& detected_object_1_, const DetectedObject& detected_object_2_); - - //Operations - void printObj(); -}; - -//------------------------------------------------------------------------------ -// class ImageDescription -//------------------------------------------------------------------------------ - -class ObjectDetectionMatcher::ImageDescription { -public: - // Constructors - ImageDescription(bool check_probs = false); - ImageDescription(const std::list &alist, bool check_probs = false); - ImageDescription(const ImageDescription& obj); - - //Operations - static float ioUMultiple(const ImageDescription &detected_objects, const ImageDescription &desired_objects); - void addDetectedObject(const DetectedObject& detected_obj); - - // Accessors - inline bool checkProbs() const; -public: - //Data section - std::list alist; - -private: - //Data section - bool check_probs_; -}; - -//------------------------------------------------------------------------------ -// class NetworkAdapter -//------------------------------------------------------------------------------ - -class ObjectDetectionMatcher::NetworkAdapter { -public: - //Operations - virtual std::vector> score(InferenceEngine::CNNNetwork network, - std::shared_ptr ie, - const std::string& deviceName, - const std::map& config, - const std::vector& images_files_names, - bool with_reshape = false, - bool useExportImport = false) const = 0; - - //Destructor - virtual ~NetworkAdapter() = default; -}; - -//------------------------------------------------------------------------------ -// Implementation of methods of class ImageDescription -//------------------------------------------------------------------------------ - -inline bool ImageDescription::checkProbs() const { - return check_probs_; -} - -} // namespace matchers -} // namespace regression \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp deleted file mode 100644 index a47d52b38848b2..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "custom_matcher.hpp" - -namespace Regression { namespace Matchers { - -using namespace InferenceEngine; - -class OptimizedNetworkMatcher : public CustomMatcher { - protected: - std::string path_to_reference_dump; - std::vector firmware; - InferenceEngine::ExecutableNetwork executableApi; - public: - - explicit OptimizedNetworkMatcher(const RegressionConfig &config) - : CustomMatcher(config, true) { - } - ~OptimizedNetworkMatcher() { - if (match_in_dctor) { - matchCustom(); - checkResult(); - //not allow base matcher to match one more time - match_in_dctor = false; - } - } - - void matchCustom(); - - void to(std::string path_to_reference_dump); - std::vector readDumpFromFile(std::string path); - void checkResult(); -}; - -class OptimizedNetworkDumper : public OptimizedNetworkMatcher { - public: - using OptimizedNetworkMatcher::OptimizedNetworkMatcher; - - ~OptimizedNetworkDumper() { - if (match_in_dctor) { - dump(); - //not allow base matcher to match one more time - match_in_dctor = false; - } - } - - void match() {} - - void dump(); - -}; - -} // namespace Regression -} // namespace Matchers diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp deleted file mode 100644 index cfd60ed8d5bdbb..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include "base_matcher.hpp" -#include - -namespace Regression { -namespace Matchers { - -class RawMatcher : public BaseMatcher { - InferenceEngine::BlobMap outputBlobs; -public: - RawMatcher(const RegressionConfig &config) - : BaseMatcher(config) { - } - - virtual void match(); - - void checkResult(const std::map> &allExpected); - - void to(const std::map> &allExpected) { - ASSERT_NO_FATAL_FAILURE(match()); - ASSERT_NO_FATAL_FAILURE(checkResult(allExpected)); - } - -}; - -} -} // namespace matchers diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp deleted file mode 100644 index 1fde58a1fdab17..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include -#include -#include - -namespace Regression { -using namespace std; - -class InferenceContext { - InferenceEngine::BlobMap _inputs; - std::vector _outputs; - std::vector _fileNames; - std::string _modelPath; - InferenceEngine::Precision _prec; - int _frameNumber = 0; - int _inputIndex = 0; - public: - std::string modelFilePath() const { - return _modelPath; - } - std::vector fileNames() const { - return _fileNames; - } - - void setModelPath(const std::string &path) { - _modelPath = path; - } - - void setModelPrecision(InferenceEngine::Precision prec) { - _prec = prec; - } - - InferenceEngine::Precision getModelPrecision() const { - return _prec; - } - - void setFileNames(const std::vector fileNames) { - _fileNames = fileNames; - } - - void setInput(std::string name, InferenceEngine::Blob::Ptr input) { - _inputs[name] = input; - } - - void setOutput(std::string name, InferenceEngine::Blob::Ptr output) { - - outputs()[name] = output; - } - - InferenceEngine::Blob::Ptr getOutput(std::string name) { - return outputs()[name]; - } - - const InferenceEngine::BlobMap& inputs() const { - return _inputs; - } - - const InferenceEngine::BlobMap& outputs() const { - return const_cast(this)->outputs(); - } - - std::vector& allOutputs() { - return _outputs; - } - - InferenceEngine::BlobMap& outputs() { - if (_outputs.empty()) { - _outputs.push_back(InferenceEngine::BlobMap()); - } - return _outputs.front(); - } - - InferenceEngine::BlobMap& newOutputs() { - _outputs.push_back(InferenceEngine::BlobMap()); - return _outputs.back(); - } - - void setFrameNumber(int num) { - _frameNumber = num; - } - - int getFrameNumber() const { - return _frameNumber; - } - - void setInputIdx(int num) { - _inputIndex = num; - } - - size_t getInputIdx() const { - return _inputIndex; - } - - std::string currentInputFile() const { - if (fileNames().empty()) { - return ""; - } - return fileNames()[std::min(getInputIdx(), fileNames().size()-1)]; - } - - const InferenceEngine::Blob::Ptr currentInputs() const { - auto input = _inputs.begin(); - std::advance(input, getInputIdx()); - return input->second; - } - -}; - -struct RegressionConfig { - struct InputFetcherResult { - bool reset = false; - bool fetchMore = false; - bool fetched = true; - int frameNumber = 0; - bool hasResult = true; - InputFetcherResult() = default; - InputFetcherResult(bool reset, bool fetchMore=false, bool fetched=true, int frameNumber = 0, bool hasResult = true) - : reset(reset), fetchMore(fetchMore), fetched(fetched), frameNumber(frameNumber), hasResult(hasResult) {} - }; - using input_fetcher = std::function; - using model_maker = std::function; - using result_fetcher = std::function; - - std::vector fetch_input; - result_fetcher fetch_result; - model_maker make_model; - string _path_to_models; - string _path_to_aot_model; - vector _paths_to_images; - string _device_name; - string _firmware; - string _tmp_firmware; - vector labels; - double nearValue = 0.0; - double nearAvgValue = 0.0; - double maxRelativeError = 0.0; - double meanRelativeError = 0.0; - bool batchMode = false; - bool compactMode = true; - bool int8Mode = false; - bool isAsync = false; - int batchSize = 1; - //number of async infer requests to create - int _nrequests = 1; - int topKNumbers = -1; - int _numNetworks = 1; - - bool useDynamicBatching = false; - int dynBatch = -1; - bool print = false; - bool useExportImport = false; - std::size_t printNum = 0; - - vector referenceOutput; - vector referenceBin; - - InferenceEngine::Blob::Ptr outputBlob; - std::string outputLayer; - InferenceEngine::Precision _inputPrecision; - InferenceEngine::Precision modelPrecision; - InferenceEngine::Precision _outputPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::map _outputBlobPrecision; - std::map* perfInfoPtr = nullptr; - std::map plugin_config; - std::map deviceMapping; - - std::shared_ptr ie_core; - - bool _reshape = false; -}; - -enum InputFormat { - RGB = 0, - BGR = 1 -}; - -} diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp deleted file mode 100644 index d743ad713727d9..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include "label_probability.hpp" - -namespace Regression { -namespace Reference { - -struct ClassificationScoringResultsForTests : public LabelProbability{ - ClassificationScoringResultsForTests(float prob, const std::string & label) - : LabelProbability(0, prob, label ){ - } -}; - -extern std::map> values; - -} // namespace Reference -} // namespace Regression diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp deleted file mode 100644 index fc8db0deb2a71b..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp +++ /dev/null @@ -1,678 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include "regression_reference.hpp" -#include "regression_config.hpp" - -#include "net_model.hpp" -#include "segmentation_matcher.hpp" -#include "custom_matcher.hpp" -#include "raw_matcher.hpp" -#include "classification_matcher.hpp" -#include "object_detection_matcher.hpp" -#include "optimized_network_matcher.hpp" - -#include "functional_test_utils/plugin_cache.hpp" - -#ifdef near -#undef near -#endif - - -namespace Regression { -using namespace Matchers; - -/** - * @brief wether to reset plugin after feeding this input, default is false. - */ - -#define afterReset(ACTOR) setCustomInput([&](const Regression::InferenceContext & _) -> \ - Regression::RegressionConfig::InputFetcherResult{return {true, false, false};}) - -#define withCustomInput(ACTOR) setCustomInput([&](const Regression::InferenceContext & _) -> \ - Regression::RegressionConfig::InputFetcherResult{ACTOR; return {};}) - -#define withCustomOutput(ACTOR) setCustomOutput([&](const Regression::InferenceContext & _){ACTOR;}) -#define withCustomModel(ACTOR) setCustomModel([&](const Regression::InferenceContext & _){ACTOR;}) - - -enum EMean { - eNo, - eValues, - eImage -}; - -static std::string format_mean(EMean isMean) { - switch (isMean) { - case eNo:return "_no_mean"; - - case eImage:return "_mf"; - - case eValues:return ""; - } - return nullptr; -} - -inline std::ostream &operator<<(std::ostream &os, EMean mean) { - return os << format_mean(mean); -} - -template -class ModelSelector { - - template - friend class ModelSelector; // every B is a friend of A - - - enum EPrecision { - eq78, efp32, efp16, ei16, ei8 - }; - - enum EGroup { - eNoGroup, eGroup - }; - - - static std::string format_precision(EPrecision precision) { - switch (precision) { - case efp32:return "fp32"; - - case eq78:return "q78"; - - case efp16:return "fp16"; - - case ei16:return "i16"; - - case ei8: return "i8"; - } - return nullptr; - } - - static std::string format_group(EGroup isGroup) { - switch (isGroup) { - case eNoGroup:return ""; - - case eGroup:return "_group"; - } - return nullptr; - } - - friend std::ostream &operator<<(std::ostream &os, EPrecision precision) { - return os << format_precision(precision); - } - - friend std::ostream &operator<<(std::ostream &os, EGroup group) { - return os << format_group(group); - } - - - Model model; - RegressionConfig config; - EMean isMean = eValues; - EPrecision precision = eq78; - EGroup isGroup = eNoGroup; - - private: - std::string prepareModelMatching() { - std::stringstream path_to_input; - path_to_input << TestDataHelpers::get_data_path(); - path_to_input << kPathSeparator - << model.resolution() << kPathSeparator; - for (auto & fileName : config._paths_to_images) { - fileName = path_to_input.str() + fileName; - } - - if (model.folderName().empty() || model.fileName().empty()) { - return ""; - } - ModelsPath path_to_model; - std::stringstream prc; - path_to_model << kPathSeparator - << model.folderName() << kPathSeparator - << model.fileName() << "_" << precision << isMean << isGroup << "." << model.extension(); - - return path_to_model.str(); - } - - ModelSelector() = default; - - std::string getReferenceResultsLabel() { - std::stringstream ss; - for (auto&& v: config.ie_core->GetVersions(config._device_name)) { - const InferenceEngine::Version& version = v.second; - if (nullptr != version.description) { - ss << version.description; - break; - } - } - std::string pluginName = ss.str(); - if (pluginName.empty()) - std::cerr << "getReferenceResultsLabel() failed for device: \"" << config._device_name << "\"" << std::endl; - - return pluginName + "_" + model.folderName() + format_mean(isMean) - + "_" + format_precision(precision) + format_group(isGroup); - } - - bool loadBlobFile(const char* fname, std::vector& outData) - { - if (!fname) - return false; - FILE *f = fopen(fname, "rb"); - if (!f) { - return false; - } - fseek(f, 0, SEEK_END); - int fsize = ftell(f); - fseek(f, 0, SEEK_SET); - outData.resize(fsize); - size_t bytesRead = fread(outData.data(), 1, fsize, f); - if (bytesRead != fsize) { - std::cout << "cannot read file" << std::endl; - return false; - } - fclose(f); - - return true; - } - public : - - explicit ModelSelector(const RegressionConfig& config) : config(config) {} - - template - explicit ModelSelector(T *oldSelector) { - config = oldSelector->config; - } - - ModelSelector &And(const std::string &fileName) { - config._paths_to_images.push_back(fileName); - return *this; - } - - ModelSelector &And(const std::vector &filesNamesVector) { - config._paths_to_images.insert(config._paths_to_images.end(), filesNamesVector.begin(), filesNamesVector.end()); - return *this; - } - - ModelSelector &on(const std::string &fileName) { - config._paths_to_images.push_back(fileName); - return *this; - } - - ModelSelector &print(const std::size_t printNum = 10) { - config.print = true; - config.printNum = printNum; - return *this; - } - - ModelSelector &useExportImport() { - config.useExportImport = true; - return *this; - } - - /// @breif - tile last batch - ModelSelector &onN_infers(int nTimesCopyInputImages) { - if (config._paths_to_images.size() != config.batchSize) { - IE_THROW() << "number of input images:" - << config._paths_to_images.size() << " not equal to batch size: " << config.batchSize; - } - auto first_image = config._paths_to_images.end(); - std::advance(first_image, -config.batchSize); - - std::vector data_for_last_infer(first_image, config._paths_to_images.end()); - - for (;nTimesCopyInputImages > 0; nTimesCopyInputImages--) { - config._paths_to_images.insert(config._paths_to_images.end(), data_for_last_infer.begin(), data_for_last_infer.end()); - } - return *this; - } - /** - * @brief - tile last input image - * @param nTimesCopyLastImagePlusOne = number of times last image will be tiled + 1 - * @deprecated - */ - ModelSelector ×(int nTimesCopyLastImagePlusOne) { - tile(nTimesCopyLastImagePlusOne - 1); - return *this; - } - /** - * @brief - tile last input image - * @param nTimesCopyLastImage = number of times last image will be tiled - * @deprecated - */ - ModelSelector &tile(int nTimesCopyLastImage) { - if (config._paths_to_images.empty()) { - return *this; - } - auto lastImage = config._paths_to_images.back(); - for (;nTimesCopyLastImage > 0; nTimesCopyLastImage--) { - config._paths_to_images.push_back(lastImage); - } - return *this; - } - - ModelSelector &onModel( - std::string _folderName, - std::string _fileName, - std::string _resolutionName) { - model = {_folderName, _fileName, _resolutionName}; - return *this; - } - - ModelSelector &onArkInput() { - model = {model.folderName(), model.fileName(), "ark"}; - return *this; - } - - ModelSelector &onFP32() { - precision = efp32; - config.modelPrecision = Precision::FP32; - return *this; - } - - ModelSelector &onI16() { - precision = ei16; - config.modelPrecision = Precision::I16; - return *this; - } - - ModelSelector &onFP16() { - precision = efp16; - config.modelPrecision = Precision::FP16; - return *this; - } - - ModelSelector &onQ78() { - precision = eq78; - config.modelPrecision = Precision::Q78; - return *this; - } - - ModelSelector& onI8() { - precision = ei8; - config.modelPrecision = Precision::I8; - return *this; - } - - ModelSelector &withInputPrecision(InferenceEngine::Precision p) { - config._inputPrecision = p; - return *this; - } - - ModelSelector &withOutputPrecision(InferenceEngine::Precision p) { - config._outputPrecision = p; - return *this; - } - - ModelSelector &withOutputPrecision(std::map p) { - static_assert(std::is_same::value, "Output precision per blob implemented only in RawMatcher"); - config._outputBlobPrecision = p; - return *this; - } - - template - typename enable_if::value, bool>::type - needInput() const { - return false; - } - - template - typename enable_if::value, bool>::type - needInput() const { - return true; - } - - ModelSelector &withBatch() { - config.batchMode = true; - return *this; - } - - ModelSelector &withBatch(int nBatchSize) { - config.batchSize = nBatchSize; - // assumption made that inputs already gets provided to matcher - if (config._paths_to_images.empty() && needInput()) { - IE_THROW() << "withBatch token should follow after setting up inputs"; - } - if (config._paths_to_images.size() < nBatchSize) { - tile(nBatchSize - config._paths_to_images.size()); - } - - return *this; - } - - ModelSelector &withDynBatch(int nLimit, int nBatchSize) { - config.batchMode = true; - config.useDynamicBatching = true; - config.batchSize = nLimit; - config.dynBatch = nBatchSize; - return *this; - } - - ModelSelector &withAsyncInferRequests(int nRequests) { - config._nrequests = nRequests; - return *this; - } - - ModelSelector &onMultipleNetworks(int nNetworks) { - config._numNetworks = nNetworks; - return *this; - } - - ModelSelector &setMean(EMean mean) { - isMean = mean; - return *this; - } - - ModelSelector &withoutMean() { - isMean = eNo; - return *this; - } - - ModelSelector &withMeanValues() { - isMean = eValues; - return *this; - } - - ModelSelector &withMeanImage() { - isMean = eImage; - return *this; - } - - ModelSelector &withGroup() { - isGroup = eGroup; - return *this; - } - - ModelSelector withTopK(int topKNumbers) { - config.topKNumbers = topKNumbers; - return *this; - } - - ModelSelector &withPluginConfig(const std::map & plugin_config) { - config.plugin_config = plugin_config; - return *this; - } - - ModelSelector &addPluginConfig(const std::map & plugin_config) { - config.plugin_config.insert(plugin_config.begin(), plugin_config.end()); - return *this; - } - - ModelSelector &withPluginConfigOption(std::string key, std::string value) { - config.plugin_config[key] = value; - return *this; - } - - ModelSelector & withImportedExecutableNetworkFrom(std::string location) { - config._path_to_aot_model = location; - return *this; - } - - template - ModelSelector &modifyConfig(const T & modifier) { - modifier(config); - return *this; - } - - ModelSelector & usingAsync() { - config.isAsync = true; - return *this; - } - - ModelSelector &fromLayer(const std::string & name) { - config.outputLayer = name; - return *this; - } - - ModelSelector& doReshape(bool reshape = true) { - config._reshape = reshape; - return *this; - } - - // type define when class in one of building method converted to new one or not -#define CUSTOM_TYPE\ - typename std::conditional::value,\ - ModelSelector&,\ - ModelSelector>::type - - private : - template - typename enable_if::value, CUSTOM_TYPE>::type modify_config(const A& action) { - action(config); - return *this; - } - - template - typename enable_if::value, CUSTOM_TYPE>::type modify_config(const A& action) { - ModelSelector newSelector(this); - action(newSelector.config); - return newSelector; - } - - public: - - template - CUSTOM_TYPE setCustomModel(const T& model_maker) { - return modify_config([&](RegressionConfig & this_config) { - this_config.make_model = model_maker; - }); - } - - template - CUSTOM_TYPE setCustomInput(const T & fetcher) { - return modify_config([&](RegressionConfig & this_config) { - this_config.fetch_input.push_back(fetcher); - }); - } - - template - CUSTOM_TYPE setCustomOutput(const T & fetcher) { - return modify_config([&](RegressionConfig & this_config) { - this_config.fetch_result = fetcher; - }); - } - - template - M equalsTo(const std::initializer_list & rhs) { - config.referenceOutput.insert(config.referenceOutput.end(), rhs.begin(), rhs.end()); - return near(0.0); - } - - template - M near(double nearValue, const TBlob & rhs) { - config.nearValue = nearValue; - for (const auto & v : rhs) { - config.referenceOutput.push_back(v); - } - config._path_to_models = prepareModelMatching(); - return M(config); - } - - M to(Blob::Ptr rhs) { - config.outputBlob = rhs; - config._path_to_models = prepareModelMatching(); - return M(config); - } - - - template - M near(double nearValue, const initializer_list> & rhs) { - config.nearValue = nearValue; - - for (auto && frame : rhs) { - for (auto && data : frame) { - config.referenceOutput.push_back(data); - } - } - config._path_to_models = prepareModelMatching(); - return M(config); - } - - template - M near_avg(double nearAvgValue, const TBlob & rhs) { - config.nearAvgValue = nearAvgValue; - return near(0.0, rhs); - } - - M near(double nearValue, double meanRelativeError = 0, double maxRelativeError = 0) { - config.nearValue = nearValue; - config.meanRelativeError = meanRelativeError; - config.maxRelativeError = maxRelativeError; - config._path_to_models = prepareModelMatching(); - return M(config); - } - - void equalToReferenceWithDelta(double nearValue) { - config.nearValue = nearValue; - config._path_to_models = prepareModelMatching(); - M(config).to(getReferenceResultsLabel()); - } - - template - M equalToReference(const TBlob & rhs) { - for (const auto & v : rhs) { - config.referenceOutput.push_back(v); - } - config._path_to_models = prepareModelMatching(); - return M(config, true); - } - - // place holder to run the matcher without providing any reference - void possible() { - config._path_to_models = prepareModelMatching(); - auto tmp = M(config); - ASSERT_NO_FATAL_FAILURE(tmp.match()); - } -}; - -/** - * @class PluginVersion - * @brief A PluginVersion class stores plugin version and initialization status - */ -struct PluginVersion : public InferenceEngine::Version { - bool initialized = false; - - explicit PluginVersion(const InferenceEngine::Version *ver) { - if (nullptr == ver) { - return; - } - InferenceEngine::Version::operator=(*ver); - initialized = true; - } - - operator bool() const noexcept { - return initialized; - } -}; - -class Builder { -private: - std::shared_ptr ie; - RegressionConfig config; - -public: - Builder(std::shared_ptr _ie) : ie(_ie) { - config.ie_core = ie; - -#ifndef NDEBUG - auto devices = ie->GetAvailableDevices(); - std::cout << "Available devices (" << devices.size() << "):" << std::endl; - for (auto&& d : devices) { - std::cout << "Device: " << d << std::endl; - for (auto&& v : ie->GetVersions(d)) - std::cout << "\t" << v.first << " : " << PluginVersion(&v.second) << std::endl; - } -#endif - } - - Builder & usingDevice(const std::string & device_name) { - config._device_name = device_name; - return *this; - } - - Builder& setPerfInfo(std::map& map) { - config.perfInfoPtr = ↦ - config.plugin_config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES); - return *this; - } - - Builder& setDeviceMapping(const std::map &deviceMapping) { - config.deviceMapping = deviceMapping; - return *this; - } - - ModelSelector classificationResults() { - return ModelSelector(config); - } - - ModelSelector classificationResultsFor(const std::vector & input) { - return ModelSelector(config).And(input); - } - - ModelSelector dumpedOptimizedNetwork() { - return ModelSelector(config); - } - - ModelSelector dumpOptimizedNetworkTo(const std::string & file) { - config._path_to_aot_model = file; - return ModelSelector(config); - } - - ModelSelector classificationResultsFor(const std::string &input = { }) { - auto selector = ModelSelector(config); - if (!input.empty()) { - selector.And(input); - } - return selector; - } - - ModelSelector segmentationResultsFor(const std::string &fileName) { - return ModelSelector(config).And(fileName); - } - ModelSelector rawResultsFor(const std::string &fileName) { - return ModelSelector(config).And(fileName); - } - ModelSelector objectDetectionResultsFor(const std::string &fileName) { - return ModelSelector(config).And(fileName); - } - ModelSelector objectDetectionResults() { - return ModelSelector(config); - } - ModelSelector objectDetectionResultsFor(const vector &filesNamesVector) { - return ModelSelector(config).And(filesNamesVector); - } -}; - -class RegressionTests : public TestsCommon { -public: - // to force overload - virtual std::string getDeviceName() const = 0; - - Builder please() { - std::shared_ptr ie = PluginCache::get().ie(getDeviceName()); - Builder b(ie); - b.usingDevice(getDeviceName()); - return b; - } -}; - -} - -#define assertThat() SCOPED_TRACE("");please() -#define saveAfterInfer() SCOPED_TRACE("");please() diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp deleted file mode 100644 index fdc86455ede157..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include -#include -#include "base_matcher.hpp" - -/** - * @class Color - * @brief A Color class stores channels of a given color - */ -class Color { -private: - unsigned char _r; - unsigned char _g; - unsigned char _b; - -public: - /** - * A default constructor. - * @param r - value for red channel - * @param g - value for green channel - * @param b - value for blue channel - */ - Color(unsigned char r, - unsigned char g, - unsigned char b) : _r(r), _g(g), _b(b) {} - - inline unsigned char red() { - return _r; - } - - inline unsigned char blue() { - return _b; - } - - inline unsigned char green() { - return _g; - } -}; - -namespace Regression { namespace Matchers { - -class SegmentationMatcher : public BaseMatcher { - private: - InferenceEngine::TBlob::Ptr output; - std::vector> outArray; - size_t C = -1; - - public: - SegmentationMatcher (const RegressionConfig & config) - : BaseMatcher(config) { - } - - virtual void match(); - - static float compareOutputBmp(std::vector> data, size_t classesNum, const std::string& inFileName); - - void checkResult(std::string imageFileName); - - SegmentationMatcher& to(std::string imageFileName) { - match(); - checkResult(imageFileName); - return *this; - } -}; - -} } // namespace matchers diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp deleted file mode 100644 index 9be0afecb4cc46..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "base_matcher.hpp" -#include -#include - -namespace Regression { namespace Matchers { - -using namespace InferenceEngine; - -void loadImage(const std::string &imageFilename, InferenceEngine::Blob::Ptr &blob, bool bgr, int batchNumber) { - TensorDesc tensDesc = blob->getTensorDesc(); - if (tensDesc.getPrecision() != InferenceEngine::Precision::FP16 - && tensDesc.getPrecision() != InferenceEngine::Precision::FP32 - && tensDesc.getPrecision()!= InferenceEngine::Precision::U8 - && tensDesc.getPrecision()!= InferenceEngine::Precision::I16) { - IE_THROW() << "loadImage error: Input must have FP16, FP32 or U8 precision"; - } - - if (tensDesc.getLayout() != NHWC && tensDesc.getLayout() != NCHW) { - IE_THROW() << "loadImage error: Input must have NHWC or NHWC layout"; - } - - FormatReader::ReaderPtr reader(imageFilename.c_str()); - if (reader.get() == nullptr) { - IE_THROW() << "loadImage error: image " << imageFilename << " cannot be read!"; - } - - size_t w = tensDesc.getDims()[3]; - size_t h = tensDesc.getDims()[2]; - if (reader->width() != w || reader->height() != h) { - IE_THROW() << "loadImage error: Input sizes mismatch, got " << reader->width() << "x" << reader->height() - << " expecting " << w << "x" << h; - } - - auto numBlobChannels = tensDesc.getDims()[1]; - size_t numImageChannels = reader->size() / (reader->width() * reader->height()); - if (numBlobChannels != numImageChannels && numBlobChannels != 1) { - IE_THROW() << "loadImage error: Input channels mismatch: image channels " << numImageChannels << ", " - << "network channels " << numBlobChannels << ", expecting count of image channels are equal " - << "to count if network channels or count of network channels are equal to 1"; - } - - auto nPixels = w * h; - uint8_t *BGR8 = reader->getData().get(); - for (unsigned int i = 0; i < nPixels; i++) { - for (unsigned int j = 0; j < numBlobChannels; j++) { - uint8_t val = bgr ? BGR8[i * numImageChannels + j] : BGR8[i * numBlobChannels + (numBlobChannels - j - 1)]; - size_t idx = tensDesc.getLayout() == NHWC ? (i * numBlobChannels + j) : (j * nPixels + i) - + nPixels * numBlobChannels * batchNumber; - auto buf = blob->buffer(); - switch (blob->getTensorDesc().getPrecision()) { - case Precision::U8: - { - auto inputDataPtr = buf.as(); - inputDataPtr[idx] = val; - break; - } - case Precision::I16: - { - auto *inputDataPtr = buf.as(); - inputDataPtr[idx] = val; - break; - } - case Precision::FP16: - { - ie_fp16 *inputDataPtr = buf.as(); - inputDataPtr[idx] = InferenceEngine::PrecisionUtils::f32tof16(static_cast(val)); - break; - } - case Precision::FP32: - { - auto inputDataPtr = buf.as(); - inputDataPtr[idx] = static_cast(val); - break; - } - default: - IE_THROW() << "Unsupported precision!"; - } - } - } -} - -void BaseMatcher::checkImgNumber(int dynBatch) { - InferenceEngine::Core ieCore; - CNNNetwork net = ieCore.ReadNetwork(config._path_to_models); - auto numInputs = net.getInputsInfo().size(); - - int batch = dynBatch > 0 ? dynBatch : config.batchSize; - - if ((numInputs * batch) > config._paths_to_images.size()) { - - auto readImagesSize = config._paths_to_images.size(); - size_t diff = (numInputs * batch) / readImagesSize; - - for (size_t i = 1; i < diff; i++) { - for (size_t j = 0; j < readImagesSize; j++) { - config._paths_to_images.push_back(config._paths_to_images[j]); - } - } - if (readImagesSize * diff != (numInputs * batch)) { - for (size_t j = 0; j < (numInputs * batch) - readImagesSize * diff; j++) { - config._paths_to_images.push_back(config._paths_to_images.at(j)); - } - } - } else if ((numInputs * batch) < config._paths_to_images.size()) { - while (config._paths_to_images.size() != batch * numInputs) { - auto name = config._paths_to_images.back(); - std::cout << "[WARNING]: Image " << name << " skipped!" << std::endl; - config._paths_to_images.pop_back(); - } - } -} - -} // namepspace Matchers -} // namespace Regression diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp deleted file mode 100644 index 7147b1b08797f5..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "classification_matcher.hpp" -#include -#include -#include - -using namespace Regression ; -using namespace Regression :: Matchers ; - -ClassificationMatcher::ClassificationMatcher(RegressionConfig &config) - : BaseMatcher(config) { - // Get file names for files with weights and labels - std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin"; - - auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName); - - std::string labelFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".labels"; - - // Try to read labels file - readLabels(labelFileName); - - if (config._reshape) { - auto inputShapes = cnnNetwork.getInputShapes(); - inputShapes.begin()->second[0] = config.batchSize; - - cnnNetwork.reshape(inputShapes); - } else if (config.batchSize != 1) { - cnnNetwork.setBatchSize(config.batchSize); - } - - _inputsInfo = cnnNetwork.getInputsInfo(); - _outputsInfo = cnnNetwork.getOutputsInfo(); - for (auto &info : _inputsInfo) { - if (config._inputPrecision != InferenceEngine::Precision::UNSPECIFIED) { - info.second->setPrecision(config._inputPrecision); - } - } - - for (auto &info : _outputsInfo) { - if (config._outputPrecision != Precision::UNSPECIFIED) { - info.second->setPrecision(config._outputPrecision); - } else { - info.second->setPrecision(config.modelPrecision); - } - } - - if (config.useDynamicBatching) { - config.plugin_config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES; - cnnNetwork.setBatchSize(config.batchSize); - } - - for (int i=0; i < config._numNetworks; i++) { - auto loadedExecutableNetwork = config.ie_core->LoadNetwork(cnnNetwork, config._device_name, config.plugin_config); - InferenceEngine::ExecutableNetwork executableNetwork; - if (config.useExportImport) { - std::stringstream stream; - loadedExecutableNetwork.Export(stream); - executableNetwork = config.ie_core->ImportNetwork(stream); - } else { - executableNetwork = loadedExecutableNetwork; - } - _executableNetworks.push_back(executableNetwork); - } - - top = (-1 == config.topKNumbers) ? 5 : config.topKNumbers; -} - -void ClassificationMatcher::to(const std::vector &expected) { - checkResultNumber = 0; - match(std::min(top, expected.size())); - checkResult(checkResultNumber, expected); - checkResultNumber++; -} - -void ClassificationMatcher::to(std::string modelType) { - auto batchSize = config.batchSize; - - if (config.useDynamicBatching) { - batchSize = config.dynBatch; - } - - checkImgNumber(batchSize); - ASSERT_NO_FATAL_FAILURE(match(10)); // This method produces top-10 reference results. - for (size_t i = 0; i < config._paths_to_images.size(); i++) { - const size_t last_slash_idx = config._paths_to_images[i].find_last_of(kPathSeparator); - if (std::string::npos != last_slash_idx) { - config._paths_to_images[i].erase(0, last_slash_idx + 1); - } - if (Regression::Reference::values.find(modelType + "_" + config._paths_to_images[i]) == - Regression::Reference::values.end()) { - FAIL() << "Reference result for " << modelType + "_" + config._paths_to_images[i] << " cannot be found"; - } - ASSERT_NO_FATAL_FAILURE(checkResult(i, Regression::Reference::values[modelType + "_" + config._paths_to_images[i]])); - } - checkResultNumber++; -} - - -void ClassificationMatcher::readLabels(std::string labelFilePath) { - std::fstream fs(labelFilePath, std::ios_base::in); - if (fs.is_open()) { - std::string line; - while (getline(fs, line)) { - config.labels.push_back(TestsCommon::trim(line)); - } - } else { - IE_THROW() << "cannot open label file: " << labelFilePath; - - } -} - -int ClassificationMatcher::getIndexByLabel(const std::string &label) { - auto result = std::find(begin(config.labels), end(config.labels), label); - if (result == config.labels.end()) { - IE_THROW() << "cannot locate index for label : " << label; - } - return static_cast(std::distance(begin(config.labels), result)); -} - -std::string ClassificationMatcher::getLabel(unsigned int index) { - if (config.labels.empty()) { - return "label #" + std::to_string(index); - } - if (index >= config.labels.size()) { - IE_THROW() << "index out of labels file: " << index; - } - - return config.labels[index]; -} - -void ClassificationMatcher::checkResult(size_t checkNumber, - const std::vector &expected) { - if (checkNumber >= _results.size()) { - FAIL() << "Expected number of results(" << checkNumber << ") is more than real number of results: " - << _results.size(); - } - auto result = _results.at(checkNumber); - - std::map expected_map; - int expectedSize = expected.size(); - int resultSize = result.size(); - - if (config.topKNumbers != -1) { - expectedSize = config.topKNumbers; - resultSize = config.topKNumbers; - } - - for (int i = 0; i < expectedSize; ++i) { - expected_map[expected[i].getLabel()] = expected[i].getProbability(); - } - - for (int i = 0; i < resultSize; ++i) { - if (expected_map.count(result[i].getLabel())) { - ASSERT_NEAR(result[i].getProbability(), expected_map[result[i].getLabel()], config.nearValue) - << "Failed for label \"" << result[i].getLabel() << "\" index " << i; - expected_map.erase(result[i].getLabel()); - } else { - // Label which not in expected list can be below last expected element - ASSERT_LE(result[i].getProbability(), expected.back().getProbability() + config.nearValue) - << "Label \"" << result[i].getLabel() << "\" not found or cannot be in expected list"; - } - } - - if (expected_map.size() != 0) { - for (auto & elem: expected_map) { - std::cout << "Label \"" << elem.first << "\" with probability=" - << elem.second << " not found in result list" << std::endl; - } - FAIL(); - } -} - -void ClassificationMatcher::match(size_t top) { - for (int i = 0; i != _executableNetworks.size(); i++) { - match_n(top, i); - } -} - -namespace { - -template -inline void TopResults(unsigned int n, TBlob& input, std::vector& output) { - SizeVector dims = input.getTensorDesc().getDims(); - size_t input_rank = dims.size(); - if (!input_rank || !dims[0]) IE_THROW() << "Input blob has incorrect dimensions!"; - size_t batchSize = dims[0]; - std::vector indexes(input.size() / batchSize); - - n = static_cast(std::min((size_t)n, input.size())); - - output.resize(n * batchSize); - - for (size_t i = 0; i < batchSize; i++) { - size_t offset = i * (input.size() / batchSize); - T* batchData = input.data(); - batchData += offset; - - std::iota(std::begin(indexes), std::end(indexes), 0); - std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes), - [&batchData](unsigned l, unsigned r) { - return batchData[l] > batchData[r]; - }); - for (unsigned j = 0; j < n; j++) { - output.at(i * n + j) = indexes.at(j); - } - } -} - -} - -void ClassificationMatcher::match_n(size_t top, int index) { - try { - auto inferRequest = _executableNetworks[index].CreateInferRequest(); - std::string prevImageName = ""; - - auto batchSize = config.batchSize; - - if (config.useDynamicBatching) { - batchSize = config.dynBatch; - inferRequest.SetBatch(batchSize); - } - - if (config._paths_to_images.size() % batchSize != 0) { - IE_THROW() << "Can not process all input images("<< config._paths_to_images.size() - <<") using given batch size of " << batchSize; - } - // loading images in batches - for (int i = 0; i < config._paths_to_images.size(); i += batchSize) { - - // has same image names - bool areImagesSame = false; - if (i > 0) { - areImagesSame = true; - for (int j = i;j != i + batchSize; j++) { - if (config._paths_to_images[j] != config._paths_to_images[j - batchSize]) { - areImagesSame = false; - break; - } - } - } - if (!areImagesSame) { - for (int j = 0; j != batchSize; j++) { - const auto & imageName = config._paths_to_images[i + j]; - - auto inputBlob = inferRequest.GetBlob(_inputsInfo.begin()->first.c_str()); - loadImage(imageName, inputBlob, true, j); - } - } - - inferRequest.Infer(); - - auto outputBlobPtr = inferRequest.GetBlob(_outputsInfo.begin()->first.c_str()); - - InferenceEngine::TBlob::Ptr outputFP32; - if (outputBlobPtr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) { - TensorDesc desc = { InferenceEngine::Precision::FP32, outputBlobPtr->getTensorDesc().getDims(), - outputBlobPtr->getTensorDesc().getLayout() }; - outputFP32 = make_shared_blob(desc); - outputFP32->allocate(); - PrecisionUtils::f16tof32Arrays(outputFP32->buffer().as(), outputBlobPtr->cbuffer().as(), outputBlobPtr->size()); - } else if (outputBlobPtr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) { - outputFP32 = dynamic_pointer_cast>(outputBlobPtr); - } else { - IE_THROW() << "Unsupported output format for test. Supported FP16, FP32"; - } - - vector topClassesIndexes; - TopResults(top, *outputFP32, topClassesIndexes); - std::vector probabilities(outputFP32->buffer().as(), - outputFP32->buffer().as() + outputFP32->size()); - - saveResults(topClassesIndexes, probabilities, top); - } - } catch (InferenceEngine::Exception &e) { - FAIL() << e.what(); - } catch (std::exception &e) { - FAIL() << e.what(); - } -} - -void ClassificationMatcher::saveResults(const std::vector &topIndexes, const std::vector &probs, size_t top) { - - for(auto idx = topIndexes.begin(); idx != topIndexes.end();) { - std::vector topResults; - for (int i = 0; i != top; i++) { - Reference::LabelProbability labelProb(*idx, probs[*idx], getLabel(*idx)); - std::cout << "index=" << labelProb.getLabelIndex() << ", probability=" << labelProb.getProbability() - << ", class=" << labelProb.getLabel() << "\n"; - topResults.push_back(labelProb); - idx++; - } - _results.push_back(topResults); - } -} diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp deleted file mode 100644 index 6ce4b3d0f4e0d2..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp +++ /dev/null @@ -1,430 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include "custom_matcher.hpp" - -using namespace InferenceEngine; - -InferenceEngine::ExecutableNetwork Regression::Matchers::CustomMatcher::createExecutableNetworkFromAOT() { - ExecutableNetwork executableApi; - try { - ctx.setFileNames(config._paths_to_images); - ctx.setModelPrecision(config.modelPrecision); - - executableApi = config.ie_core->ImportNetwork(config._path_to_aot_model, config._device_name, config.plugin_config); - } - catch (std::exception &e) { - GTEST_MESSAGE_(e.what(), ::testing::TestPartResult::kFatalFailure); - } - - return executableApi; - -} - -InferenceEngine::ExecutableNetwork Regression::Matchers::CustomMatcher::createExecutableNetworkFromIR(){ - ExecutableNetwork executableApi; - try { - ctx.setFileNames(config._paths_to_images); - ctx.setModelPrecision(config.modelPrecision); - - if (config.make_model) { - ctx.setModelPath(config._path_to_models); - config.make_model(ctx); - } - - std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin"; - network = config.ie_core->ReadNetwork(config._path_to_models, binFileName); - - // Change batch size if it is not equal 1 - auto inputs = network.getInputsInfo(); - - if (config._inputPrecision) { - for (auto && input : inputs) { - input.second->setPrecision(config._inputPrecision); - // NC is a proper layout for 2d blob if different is not specified, like CN - auto layout = input.second->getTensorDesc().getDims().size() == 4 ? NCHW : NC; - input.second->getInputData()->setLayout(layout); - } - } - - //TODO: why this need - if (inputs.begin()->second->getTensorDesc().getDims().at(0) != 1) { - std::cerr << "[WARNING]: Batch size will be equal to 1." << std::endl; - network.setBatchSize(1); - } - - if (config.batchSize != 1) { - network.setBatchSize(config.batchSize); - } - - if (!config.outputLayer.empty()) { - network.addOutput(config.outputLayer); - } - - if (config.useDynamicBatching) { - config.plugin_config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES; - } - - auto outInfo = network.getOutputsInfo(); - - auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config); - if (config.useExportImport) { - std::stringstream stream; - loadedExecutableNetwork.Export(stream); - executableApi = config.ie_core->ImportNetwork(stream); - } else { - executableApi = loadedExecutableNetwork; - } - - } - catch (std::exception &e) { - GTEST_MESSAGE_(e.what(), ::testing::TestPartResult::kFatalFailure); - } - - return executableApi; -} - -void Regression::Matchers::CustomMatcher::matchCustom() { - try { - ExecutableNetwork executableApi; - std::vector inferRequests; - ConstInputsDataMap constInputs; - ConstOutputsDataMap constOutInfo; - ResponseDesc dsc; - StatusCode sts = OK; - - if (!config._path_to_aot_model.empty()) { - ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromAOT()); - } else { - ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromIR()); - } - - if (executableApi) { - for (int i=0; i != config._nrequests; i++ ) { - inferRequests.push_back(executableApi.CreateInferRequest()); - } - } - - if (config.useDynamicBatching) { - for (auto && req : inferRequests) { - req.SetBatch(config.dynBatch); - } - } - - auto make_unified_endpoints = [&] () { - if (executableApi) { - return std::make_pair(executableApi.GetInputsInfo(), executableApi.GetOutputsInfo()); - } - auto inputs2 = network.getInputsInfo(); - ConstInputsDataMap constInputs2; - for (const auto & input : inputs2) { - constInputs2[input.first] = input.second; - } - auto output2 = network.getOutputsInfo(); - ConstOutputsDataMap constOutInfo2; - for (const auto & output : output2) { - constOutInfo2[output.first] = output.second; - } - return std::make_pair(constInputs2, constOutInfo2); - }; - - auto endpoints = make_unified_endpoints(); - - for (auto && fetch_input : config.fetch_input) { - // each fetcher can be used multiple times - for (;;) { - // load new input - reset if necessary - decltype(fetch_input(ctx)) fetchResult; - - int requestProcessed = 0; - for (int i = 0; i != config._nrequests; i++) { - int inputId = 0; - for (auto input : endpoints.first) { - InferenceEngine::Blob::Ptr inputBlb; - inputBlb = inferRequests[i].GetBlob(input.first); - ctx.setInput(input.second->name(), inputBlb); - ctx.setInputIdx(inputId); - decltype(fetch_input(ctx)) fetchResultForInput; - ASSERT_NO_FATAL_FAILURE(fetchResultForInput = fetch_input(ctx)); - if (inputId != 0) { - ASSERT_EQ(fetchResult.fetched, fetchResultForInput.fetched); - ASSERT_EQ(fetchResult.fetchMore, fetchResultForInput.fetchMore); - ASSERT_EQ(fetchResult.frameNumber, fetchResultForInput.frameNumber); - ASSERT_EQ(fetchResult.reset, fetchResultForInput.reset); - } else { - fetchResult = fetchResultForInput; - } - inputId++; - } - - if (fetchResult.fetched) { - // number of requests to infer in parallel - requestProcessed++; - // increasing frame number this however might be done in input fetcher if CTX passed by non const reference - // value used in read_next_.. fetchers family - ctx.setFrameNumber(ctx.getFrameNumber() + 1); - } - // cannot spawn more requests due to reset - if (fetchResult.reset) { - break; - } - // end of stream - if (!fetchResult.fetchMore) { - break; - } - } - - if (fetchResult.fetched) { - // Infer model - if (requestProcessed == 1) { - inferRequests.front().Infer(); - sts = OK; - } else { - for (int i = 0; i != requestProcessed; i++) { - inferRequests[i].StartAsync(); - } - for (int i = 0; i != requestProcessed; i++) { - inferRequests[i].Wait(InferRequest::RESULT_READY); - } - sts = OK; - } - - if (!fetchResult.hasResult) { - continue; - } - - // for infer request case will copy resulted blob - for (int i = 0; i != requestProcessed;i++) { - auto &outputs = ctx.newOutputs(); - for (auto output : endpoints.second) { - auto tblob = dynamic_pointer_cast>(inferRequests[i].GetBlob(output.second->getName())); - outputs[output.second->getName()] = make_shared_blob(*tblob); - } - } - } - - IE_SUPPRESS_DEPRECATED_START - if (fetchResult.reset) { - auto states = executableApi.QueryState(); - ASSERT_FALSE(states.empty()); - for(auto& state : states) { - state.Reset(); - } - // also store reset indicator for comparison routine - auto &outputs = ctx.newOutputs(); - outputs["reset"] = nullptr; - //continue; - } - IE_SUPPRESS_DEPRECATED_END - - //FAIL()<<"stop after one frame"; - - // Check errors - if (sts == GENERAL_ERROR) { - IE_THROW() << "Scoring failed! Critical error: " << dsc.msg; - } else if (sts == NOT_IMPLEMENTED) { - IE_THROW() << "Scoring failed! Input data is incorrect and not supported!"; - } else if (sts == NETWORK_NOT_LOADED) { - IE_THROW() << "Scoring failed! " << dsc.msg; - } - if (!fetchResult.fetchMore) break; - } - } - } - catch (std::exception &e) { - FAIL() << e.what(); - } -} - -void Regression::Matchers::CustomMatcher::checkResult() { - bool cmpNear = !isApproximatelyEqual(config.nearValue, 0.0); - bool cmpNearAvg = !isApproximatelyEqual(config.nearAvgValue, 0.0); - bool isSaveOutput = !!config.outputBlob; - - /** - * In case where external comparison is used - */ - if (isSaveOutput) { - if (!config.fetch_result) { - // calculating all outputs size - SizeVector dimsMerged; - for(auto && output : ctx.allOutputs()) { - auto outputBlobIt = config.outputLayer.empty() ? output.begin() : output.find(config.outputLayer); - auto outBlob = outputBlobIt->second; - - if (dimsMerged.empty()) { - dimsMerged = outBlob->getTensorDesc().getDims(); - } else { - ASSERT_EQ(dimsMerged.size(), outBlob->getTensorDesc().getDims().size()); - int added = 0; - std::transform(begin(dimsMerged), - end(dimsMerged), - begin(dimsMerged = outBlob->getTensorDesc().getDims()), - begin(dimsMerged), - [&added](size_t l, size_t r) { - added += l != r; - return added ? l + r : l; - }); - ASSERT_LE(added,1); - - if (added == 0 && !dimsMerged.empty()) { - dimsMerged.back() += outBlob->getTensorDesc().getDims().back(); - } - } - } - - config.outputBlob->deallocate(); - config.outputBlob->getTensorDesc() = TensorDesc(config.outputBlob->getTensorDesc().getPrecision(), - dimsMerged, - TensorDesc::getLayoutByDims(dimsMerged)); - config.outputBlob->allocate(); - float *buff = config.outputBlob->buffer(); - - // copying all output frames into allocated blob - for(auto && output : ctx.allOutputs()) { - - auto outputBlobIt = config.outputLayer.empty() ? output.begin() : output.find(config.outputLayer); - auto outBlob = dynamic_pointer_cast>(outputBlobIt->second); - - for (auto value : *outBlob) { - *(buff++) = value; - } - } - - } else { - auto outBlob = dynamic_pointer_cast>(config.fetch_result(ctx)); - - config.outputBlob->deallocate(); - config.outputBlob->getTensorDesc() = TensorDesc(outBlob->getTensorDesc().getPrecision(), - outBlob->getTensorDesc().getDims(), - TensorDesc::getLayoutByDims(outBlob->getTensorDesc().getDims())); - config.outputBlob->allocate(); - float *buff = config.outputBlob->buffer(); - - int i = 0; - for (auto value : *outBlob) { - buff[i++] = value; - } - } - return; - } - - if (cmpNear || cmpNearAvg) { - int idx = 0; - float avgDiff = 0.0f; - float maxDiff = 0.0f; - float maxAverageDiff = 0.0f; - float rms = 0.0f; - int nFrame = -1; - float avgFrames = 0.0f; - - if (!config.fetch_result) { - decltype(ctx.allOutputs().begin()) output; - for(;;) { - avgFrames++; - if (nFrame == -1) { - output = ctx.allOutputs().begin(); - nFrame = 0; - } else { - nFrame++; - ++output; - } - if (output == ctx.allOutputs().end()) { - break; - } - auto outputBlobIt = config.outputLayer.empty() ? output->begin() : output->find(config.outputLayer); - auto outBlob = dynamic_pointer_cast>(outputBlobIt->second); - - // fo reset case we are storing fake blob pointer - if (outBlob == nullptr) { - avgDiff = 0.0; - rms = 0.0; - nFrame--; - avgFrames = 0.0; - continue; - } - float rmsp = 0.0; - float avgDiffp = 0.0; - ASSERT_LE(outBlob->size(), config.referenceOutput.size()); - for (auto value : *outBlob) { - if (cmpNear) { - ASSERT_NEAR(value, config.referenceOutput[idx], config.nearValue) << " at " << idx; - } - auto diff = abs(value - config.referenceOutput[idx]); - avgDiffp += diff; - rmsp += diff*diff; - maxDiff = std::max(maxDiff, diff); - idx++; - } - - rmsp = sqrt(rmsp / outBlob->size()); - rms += rmsp; - avgDiffp /= outBlob->size(); - avgDiff += avgDiffp; - maxAverageDiff = std::max(maxAverageDiff, avgDiff / avgFrames); - - //TODO: add test_log parse from command line -// #define TEST_LOG -#ifdef TEST_LOG - auto threshold_similarity_max = config.nearValue - maxDiff; - auto threshold_similarity_avg = config.nearAvgValue - avgDiff / avgFrames; - - cout << "Frame # " << nFrame << "\n"; - cout << "MaxDiff : " << maxDiff << " (" - << std::fixed << std::setprecision(5) << threshold_similarity_max <<")" << "\n"; - cout << "RMSE : " << rmsp << "\n"; - cout << "AvgDiff/f : " << avgDiffp << "\n"; - cout << "MaxAvgDiff: " << maxAverageDiff - << std::fixed << std::setprecision(5) << " (" << threshold_similarity_avg <<")" << std::endl; -#endif - - if (cmpNearAvg) { - ASSERT_NEAR(avgDiff / avgFrames, 0, config.nearAvgValue); - } - } - } else { - auto ptr = dynamic_pointer_cast>(config.fetch_result(ctx)); - - for (auto value : *ptr) { - if (cmpNear) { - ASSERT_NEAR(value, config.referenceOutput[idx], config.nearValue) << " at " << idx; - } - if (cmpNearAvg) { - avgDiff += abs(value - config.referenceOutput[idx]); - } - idx++; - } - if (cmpNearAvg) { - avgDiff /= ptr->size(); - } - } - } else { - // for small expectations lets use string as a compare buddy - stringstream ss, ssr; - - if (!config.fetch_result) { - for (auto output : ctx.outputs()) { - auto outBlob = dynamic_pointer_cast>(output.second); - for (auto value : *outBlob) { - ss << setprecision(precision) << fixed << (float)value << "."; - } - } - } else { - auto ptr = dynamic_pointer_cast>(config.fetch_result(ctx)); - - for (auto value : *ptr) { - ss << setprecision(precision) << fixed << (float)value << "."; - } - } - - for (auto value : config.referenceOutput) { - ssr << setprecision(precision) << fixed << (float)value << "."; - } - - ASSERT_STREQ(ssr.str().c_str(), ss.str().c_str()); - } -} diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp deleted file mode 100644 index edea2ae0384f09..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "net_model.hpp" - -//------------------------------------------------------------------------------ -// Implementation of methods of class Model -//------------------------------------------------------------------------------ - -Model::Model(const std::string &folderName, - const std::string &fileName, - const std::string &resolution, - const std::string & extension) : - folderName_(folderName), - fileName_(fileName), - resolutionName_(resolution), - extensionName_(extension) { -}; diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp deleted file mode 100644 index f1c7ab8046da84..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "object_detection_matcher.hpp" -#include - -#include - -using namespace Regression::Matchers; - -namespace Regression { -namespace Matchers { - -using DetectedObject = ObjectDetectionMatcher::DetectedObject; -using ImageDescription = ObjectDetectionMatcher::ImageDescription; -using namespace InferenceEngine; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// ObjectDetectionMatcher::DetectedObject ////////////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -ObjectDetectionMatcher::DetectedObject::DetectedObject(int objectType, - float xmin, - float ymin, - float xmax, - float ymax, - float prob, - int) - : objectType(objectType), xmin(xmin), xmax(xmax), ymin(ymin), ymax(ymax), prob(prob) { -} - -ObjectDetectionMatcher::DetectedObject::DetectedObject(const DetectedObject &other) { - this->objectType = other.objectType; - this->xmin = other.xmin; - this->xmax = other.xmax; - this->ymin = other.ymin; - this->ymax = other.ymax; - this->prob = other.prob; -} - -float ObjectDetectionMatcher::DetectedObject::ioU(const DetectedObject &detected_object_1_, - const DetectedObject &detected_object_2_) { - // Add small space to eliminate empty squares - float epsilon = 1e-3; - - DetectedObject detectedObject1(detected_object_1_.objectType, - detected_object_1_.xmin - epsilon, - detected_object_1_.ymin - epsilon, - detected_object_1_.xmax + epsilon, - detected_object_1_.ymax + epsilon, detected_object_1_.prob); - DetectedObject detectedObject2(detected_object_2_.objectType, - detected_object_2_.xmin - epsilon, - detected_object_2_.ymin - epsilon, - detected_object_2_.xmax + epsilon, - detected_object_2_.ymax + epsilon, detected_object_2_.prob); - - if (detectedObject1.objectType != detectedObject2.objectType) { - // objects are different, so the result is 0 - return 0.0f; - } - - if (detectedObject1.xmax < detectedObject1.xmin) return 0.0; - if (detectedObject1.ymax < detectedObject1.ymin) return 0.0; - if (detectedObject2.xmax < detectedObject2.xmin) return 0.0; - if (detectedObject2.ymax < detectedObject2.ymin) return 0.0; - - float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin); - float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin); - float xmax = (std::min)(detectedObject1.xmax, detectedObject2.xmax); - float ymax = (std::min)(detectedObject1.ymax, detectedObject2.ymax); - // intersection - float intr; - - if ((xmax >= xmin) && (ymax >= ymin)) { - intr = (xmax - xmin) * (ymax - ymin); - } else { - intr = 0.0f; - } - - // union - float square1 = (detectedObject1.xmax - detectedObject1.xmin) * (detectedObject1.ymax - detectedObject1.ymin); - float square2 = (detectedObject2.xmax - detectedObject2.xmin) * (detectedObject2.ymax - detectedObject2.ymin); - - float unn = square1 + square2 - intr; - - return float(intr) / unn; -} - -void ObjectDetectionMatcher::DetectedObject::printObj() { - printf("[%p] objectType=%d, xmin=%f, xmax=%f, ymin=%f, ymax=%f, prob=%f\n", - this, - objectType, - xmin, - xmax, - ymin, - ymax, - prob); -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// ObjectDetectionMatcher::ImageDescription //////////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -ObjectDetectionMatcher::ImageDescription::ImageDescription(bool check_probs) : - check_probs_(check_probs) { -} - -ObjectDetectionMatcher::ImageDescription::ImageDescription(const std::list &alist, bool check_probs) - : alist(alist), check_probs_(check_probs) { -} - -ObjectDetectionMatcher::ImageDescription::ImageDescription(const ImageDescription &obj) : - check_probs_(obj.checkProbs()) { - this->alist = obj.alist; -} - -float ObjectDetectionMatcher::ImageDescription::ioUMultiple(const ImageDescription &detected_objects, - const ImageDescription &desired_objects) { - - const ImageDescription *detectedObjectsSmall, *detectedObjectsBig; - bool check_probs = desired_objects.checkProbs(); - - if (detected_objects.alist.size() < desired_objects.alist.size()) { - detectedObjectsSmall = &detected_objects; - detectedObjectsBig = &desired_objects; - } else { - detectedObjectsSmall = &desired_objects; - detectedObjectsBig = &detected_objects; - } - - std::list doS = detectedObjectsSmall->alist; - std::list doB = detectedObjectsBig->alist; - - float fullScore = 0.0f; - while (doS.size() > 0) { - float score = 0.0f; - std::list::iterator bestJ = doB.end(); - for (auto j = doB.begin(); j != doB.end(); j++) { - float curscore = DetectedObject::ioU(*doS.begin(), *j); - if (score < curscore) { - score = curscore; - bestJ = j; - } - } - - float coeff = 1.0; - if (check_probs) { - if (bestJ != doB.end()) { - DetectedObject test = *bestJ; - DetectedObject test1 = *doS.begin(); - float min = std::min((*bestJ).prob, (*doS.begin()).prob); - float max = std::max((*bestJ).prob, (*doS.begin()).prob); - - coeff = min / max; - } - } - - doS.pop_front(); - if (bestJ != doB.end()) doB.erase(bestJ); - fullScore += coeff * score; - } - fullScore /= detectedObjectsBig->alist.size(); - - return fullScore; -} - -void ObjectDetectionMatcher::ImageDescription::addDetectedObject(const DetectedObject &detected_obj) { - alist.push_back(detected_obj); -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// ObjectDetectionMatcher::ObjectDetectionMatcher ////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -ObjectDetectionMatcher::ObjectDetectionMatcher(const RegressionConfig &config) - : BaseMatcher(config) { -} - -void ObjectDetectionMatcher::match(const ScoreFunction& score_function) { - // Read network - string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin"; - auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName); - - if (config._reshape) { - auto inputShapes = cnnNetwork.getInputShapes(); - for (auto & shape : inputShapes) { - shape.second[0] = config.batchSize; - } - - cnnNetwork.reshape(inputShapes); - } else if (config.batchSize != 1) { - cnnNetwork.setBatchSize(config.batchSize); - } - - res_desc_ = score_function(cnnNetwork); - - if (res_desc_.size() != config.batchSize) { - FAIL() << "[ERROR]: Result batch size is not equal to initial."; - } -} - -void ObjectDetectionMatcher::checkResult(const std::vector &desired) { - if ((desired.size() < config.batchSize) || (res_desc_.size() != config.batchSize)) { - FAIL() << "[ERROR]: Number of ImageDescription objects less then batch size or result batch size is not equal to initial.\n" - << "Batch size: " << config.batchSize << "; Expected outputs number: " << desired.size() - << "; Result number: " << res_desc_.size(); - } - string sError; - for (int i = 0; i < config.batchSize; i++) { - double iou = ImageDescription::ioUMultiple(*res_desc_[i], desired[i]); - double minimalScore = 1.0 - config.nearValue; - if (iou < minimalScore) { - sError += "[ERROR]: Batch #" + std::to_string(i) + ". Similarity is too low: " + std::to_string(iou) - + ". Expected " + std::to_string(minimalScore) + "\n"; - } else { - std::cout << "Batch #" << i << ". Similarity " << iou << " is above the expected value: " << minimalScore - << std::endl; - } - } - - if (!sError.empty()) { - FAIL() << sError; - } -} - -void ObjectDetectionMatcher::to(const ImageDescription &desired, const std::shared_ptr& adapter) { - std::vector desired_vector = {desired}; - ASSERT_NO_FATAL_FAILURE(to(desired_vector, adapter)); -} - -void ObjectDetectionMatcher::to(const std::vector &desired, - const std::shared_ptr& adapter) { - to(desired, [&](CNNNetwork & network) -> ImageDescriptionPtrVect { - return adapter->score(network, - config.ie_core, - config._device_name, - config.plugin_config, - config._paths_to_images, - config._reshape, - config.useExportImport); - }); -} - -void ObjectDetectionMatcher::to(const ImageDescription &desired, const NetworkAdapter& adapter) { - std::vector desired_vector = {desired}; - ASSERT_NO_FATAL_FAILURE(to(desired_vector, adapter)); -} - -void ObjectDetectionMatcher::to(const std::vector &desired, - const NetworkAdapter& adapter) { - to(desired, [&](CNNNetwork& network) -> ImageDescriptionPtrVect { - return adapter.score(network, - config.ie_core, - config._device_name, - config.plugin_config, - config._paths_to_images, - config._reshape); - }); -} - -void ObjectDetectionMatcher::to(const std::vector &desired, const ScoreFunction& score_function) { - // ASSERT_NO_FATAL_FAILURE(checkImgNumber()); - ASSERT_NO_FATAL_FAILURE(match(score_function)); - if (desired.size() < config.batchSize) { - std::cout << "Number of ImageDescription objects less then batch size" << std::endl; - std::vector newRef; - for (int i = 0; i < config.batchSize; i++) { - newRef.push_back(desired[0]); - } - ASSERT_NO_FATAL_FAILURE(checkResult(newRef)); - } else { - ASSERT_NO_FATAL_FAILURE(checkResult(desired)); - } -} - -} // namespace matchers -} // namespace regression \ No newline at end of file diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp deleted file mode 100644 index 63bce010583428..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include "optimized_network_matcher.hpp" - -using namespace InferenceEngine; -using namespace InferenceEngine::PluginConfigParams; - - -void Regression :: Matchers :: OptimizedNetworkMatcher :: to(std::string path_to_reference_dump) { - ModelsPath path_to_firmware; - path_to_firmware << kPathSeparator << config._firmware << kPathSeparator; - - auto compact_token = config.compactMode ? "_compact" : ""; - - this->path_to_reference_dump = path_to_firmware + path_to_reference_dump + compact_token + "_firmware.bin"; -} - -void Regression :: Matchers :: OptimizedNetworkMatcher :: matchCustom () { - ASSERT_NO_FATAL_FAILURE(createExecutableNetworkFromIR()); - firmware = readDumpFromFile(config._tmp_firmware); - ASSERT_NE(firmware.size(), 0); -} - -std::vector Regression :: Matchers :: OptimizedNetworkMatcher :: readDumpFromFile(std::string path) { - std::ifstream file(path, std::ios::binary | std::ios::ate); - std::streamsize size = file.tellg(); - if (size <=0) { - return std::vector(); - } - file.seekg(0, std::ios::beg); - - std::vector buffer(size); - file.read((char*)buffer.data(), size); - - return buffer; -} - -void Regression :: Matchers :: OptimizedNetworkMatcher :: checkResult() { - auto refFirmware = readDumpFromFile(path_to_reference_dump); - - ASSERT_EQ(refFirmware.size(), firmware.size()) << "Reference: " << path_to_reference_dump; - - for (int i = 0; i < refFirmware.size(); ++i) { - ASSERT_EQ(refFirmware[i], firmware[i]) << "firmware mismatch at: " << i << " byte"; - } -} - -//////////////////////////// - -void Regression :: Matchers :: OptimizedNetworkDumper::dump() { - ExecutableNetwork executableApi; - ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromIR()); - try { - executableApi.Export(config._path_to_aot_model); - } - catch (const std::exception &e) { - FAIL() << e.what(); - } - -} diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp deleted file mode 100644 index 59c65031d296a8..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include "raw_matcher.hpp" -#include - -namespace Regression { -namespace Matchers { - -void RawMatcher::match() { - try { - // Read network - std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin"; - std::cout << config._path_to_models << std::endl; - auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName); - - InferenceEngine::InputsDataMap networkInputs; - networkInputs = cnnNetwork.getInputsInfo(); - if (networkInputs.size() == 0) { - IE_THROW() << "No inputs detected."; - } - - if (config._paths_to_images.size() % ( config.batchSize * networkInputs.size()) != 0) { - std::cerr << "[WARNING]: Can not process all input images("<< config._paths_to_images.size() - <<") using given batch size of " << config.batchSize << ". Batch size will be equal 1." << std::endl; - config.batchSize = 1; - } - - InferenceEngine::DataPtr inputData = cnnNetwork.getInputsInfo().begin()->second->getInputData(); - InferenceEngine::SizeVector inputDims = inputData->getTensorDesc().getDims(); - - if (config._reshape) { - auto inputShapes = cnnNetwork.getInputShapes(); - inputShapes.begin()->second[0] = config.batchSize; - - cnnNetwork.reshape(inputShapes); - } else if (config.batchSize != 1) { - cnnNetwork.setBatchSize(config.batchSize); - } - - // TODO(amalyshe) quick dirty solution which might not cover all topologies, - // but covers only networks having one input passing to one layer - CNNLayerPtr layer; - for (auto input : networkInputs) { - InputInfo::Ptr q = input.second; - if (config._inputPrecision) q->setPrecision(config._inputPrecision); - DataPtr p = q->getInputData(); - IE_SUPPRESS_DEPRECATED_START - layer = getInputTo(p).begin()->second; - IE_SUPPRESS_DEPRECATED_END - } - - { - // Set output precision - InferenceEngine::OutputsDataMap out; - out = cnnNetwork.getOutputsInfo(); - for (auto &&item : out) { - Blob::Ptr output; - auto outputName = item.first; - auto& outBlob = item.second; - if (config._outputPrecision) outBlob->setPrecision(config._outputPrecision); - if (config._outputBlobPrecision.count(outputName)) outBlob->setPrecision(config._outputBlobPrecision[outputName]); - } - } - - if (!config.deviceMapping.empty()) { - IE_SUPPRESS_DEPRECATED_START - CNNNetDFS(layer, [&](const CNNLayerPtr &layer) { - auto it = config.deviceMapping.find(layer->name); - if (it != config.deviceMapping.end()) { - layer->affinity = it->second; - } else { - layer->affinity = "CPU"; - } - }); - IE_SUPPRESS_DEPRECATED_END - } - - // Read image - std::vector> imagesData; - unsigned int actualNetSize = 0; - for (auto & imageName : config._paths_to_images) { - FormatReader::ReaderPtr reader(imageName.c_str()); - if (reader.get() == nullptr) { - IE_THROW() << "[ERROR]: Image " + imageName + " cannot be read!"; - } - actualNetSize += reader->size(); - // Store image data - - size_t width = 0, height = 0; - SizeVector dims = inputData->getTensorDesc().getDims(); - if (dims.size() == 3) { - height = dims.at(1); - width = dims.at(2); - } else if (dims.size() == 4) { - height = dims.at(2); - width = dims.at(3); - } else if (dims.size() == 5) { - height = dims.at(3); - width = dims.at(4); - } else { - IE_THROW() << inputData->getName() << " has unsupported layout " << inputData->getTensorDesc().getLayout(); - } - - std::shared_ptr data(reader->getData(width, height)); - if (data.get() != nullptr) { - imagesData.push_back(data); - } else { - IE_THROW() << "Invalid image '" << imageName << "'"; - } - } - - auto out2 = cnnNetwork.getOutputsInfo(); - for (auto &&item : out2) { - if (config._outputPrecision) item.second->setPrecision(config._outputPrecision); - if (config._outputBlobPrecision.count(item.first)) { - item.second->setPrecision(config._outputBlobPrecision[item.first]); - } - } - - auto loadedExecutableNetwork = config.ie_core->LoadNetwork(cnnNetwork, config._device_name, config.plugin_config); - InferenceEngine::ExecutableNetwork executableNetwork; - if (config.useExportImport) { - std::stringstream stream; - loadedExecutableNetwork.Export(stream); - executableNetwork = config.ie_core->ImportNetwork(stream); - } else { - executableNetwork = loadedExecutableNetwork; - } - auto inferRequest = executableNetwork.CreateInferRequest(); - - InferenceEngine::BlobMap inputBlobs; - - auto allocateBlob = [](const InferenceEngine::TensorDesc& desc) { - InferenceEngine::Blob::Ptr blob; - switch (desc.getPrecision()) { - case InferenceEngine::Precision::FP32 : - blob = InferenceEngine::make_shared_blob(desc); - break; - case InferenceEngine::Precision::FP16 : - case InferenceEngine::Precision::Q78 : - case InferenceEngine::Precision::I16 : - blob = InferenceEngine::make_shared_blob(desc); - break; - case InferenceEngine::Precision::U8 : - blob = InferenceEngine::make_shared_blob(desc); - break; - default: - IE_THROW() << "Unsupported blob precision: " << desc.getPrecision(); - } - blob->allocate(); - - return blob; - }; - - for(auto&& inputInfo : cnnNetwork.getInputsInfo()) { - std::string inputName = inputInfo.first; - - if (!inferRequest) { - // Allocate blobs - inputBlobs[inputName] = allocateBlob(inputInfo.second->getTensorDesc()); - } else { - inputBlobs[inputName] = inferRequest.GetBlob(inputName); - } - } - - { - InferenceEngine::OutputsDataMap out; - out = cnnNetwork.getOutputsInfo(); - for (auto &&item : out) { - Blob::Ptr output; - auto outputName = item.first; - if (!inferRequest) { - output = allocateBlob(item.second->getTensorDesc()); - } else { - // TODO(amalyshe): we need to return GetBlob eventually after the fix bug in mkldnnplugin - output = inferRequest.GetBlob(outputName); - // output = allocateBlob(item.second->getTensorDesc()); - // inferRequest.SetBlob(outputName, output); - } - outputBlobs[outputName] = output; - } - } - - // loading images in batches - for (int i = 0; i < config._paths_to_images.size(); i += config.batchSize * inputBlobs.size()) { - int k = 0; - for(auto&& input: inputBlobs) { - for (int j = 0; j != config.batchSize; j++) { - const auto & imageName = config._paths_to_images[i + j + k]; - loadImage(imageName, input.second, true, j); - } - k++; - } - - if (config.isAsync) { - inferRequest.StartAsync(); - inferRequest.Wait(InferRequest::WaitMode::RESULT_READY); - } else { - inferRequest.Infer(); - } - - // Get performance info - if (config.perfInfoPtr != nullptr) { - *config.perfInfoPtr = inferRequest.GetPerformanceCounts(); - } - } - } catch (Exception &e) { - FAIL() << e.what(); - } - catch (std::exception &e) { - FAIL() << e.what(); - } -} - -void RawMatcher::checkResult(const std::map> &allExpected) { - auto prepareResults = [&](const Blob::Ptr& output) { - std::vector tmp_buffer; - - if (output->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) { - tmp_buffer.resize(output->size(), 0.f); - PrecisionUtils::f16tof32Arrays(tmp_buffer.data(), - output->buffer().as(), - output->size()); - } else { - assert(output->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32); - tmp_buffer.resize(output->size(), 0.f); - std::copy_n(output->buffer().as(), output->size(), tmp_buffer.begin()); - } - - return tmp_buffer; - }; - if(config.print) { - std::cout << "{"; - for(auto&& out : outputBlobs) { - Blob::Ptr& output = out.second; - auto results = prepareResults(output); - std::cout << "{{\"" << out.first <<"\", {\n"; - for(std::size_t i = 0; i < output->size(); i += (output->size() + config.printNum - 1)/ config.printNum) { - std::cout << "{" << i <<", "<< results[i] << "},\n"; - } - std::cout << "}}},\n"; - } - std::cout << "};" << std::endl; - } else { - std::stringstream strm; - auto generateInfo = [&](const std::vector& results, const std::map& expected) { - double meanRelative = 0; - double maxAbsolute = 0; - double maxRelative = 0; - strm << std::endl << std::setw(15) << "Position" << std::setw(15) << - "Expected" << std::setw(15) << - "Actual" << std::setw(15) << - "Absolute" << std::setw(15) << - "Relative,%" << std::endl; - for (auto e : expected) { - double absolute = fabs(e.second - results[e.first]); - double relative = fabs(e.second - results[e.first]) / fabs(e.second); - - strm << std::setw(15) << e.first - << std::setw(15) << std::setprecision(6) << e.second - << std::setw(15) << std::setprecision(6) << results[e.first] - << std::setw(15) << std::setprecision(6) << absolute - << std::setw(15) << std::setprecision(6) << relative*100 << std::endl; - meanRelative += relative; - maxAbsolute = std::max(maxAbsolute, absolute); - maxRelative = std::max(maxRelative, relative); - } - strm << "Max Absolute = " << maxAbsolute - << " Mean Relative = " << meanRelative*100/expected.size() - << " Max Relative = " << maxRelative*100 << '\n'; - }; - - if(0 != config.nearValue) { - for(auto expectedPair : allExpected) { - Blob::Ptr output = outputBlobs[expectedPair.first]; - if (!output) { - FAIL() << "Was not able to find expected output " << expectedPair.first; - } - - auto results = prepareResults(output); - - const std::map &expected = expectedPair.second; - - for (auto e : expected) { - if (fabs(e.second - results[e.first]) > config.nearValue) { - strm << "In blob " << expectedPair.first - << " element at " << e.first << " index expected to be " << e.second << " but in fact it is " - << results[e.first] << - " Delta = " << (fabs(e.second - results[e.first])); - generateInfo(results, expected); - FAIL() << strm.str(); - } - } - } - } - if(0 != config.meanRelativeError) { - for(auto expectedPair : allExpected) { - Blob::Ptr output = outputBlobs[expectedPair.first]; - if (!output) { - FAIL() << "Was not able to find expected output " << expectedPair.first; - } - auto results = prepareResults(output); - - std::map& expected = expectedPair.second; - - double meanRelative = 0; - for (auto e : expected) { - double eps = fabs(e.second - results[e.first]) / fabs(e.second); - meanRelative += eps; - } - meanRelative /= expected.size(); - meanRelative *= 100; - - if (meanRelative > config.meanRelativeError) { - strm << "In blob " << expectedPair.first - << " Mean Relative Error = " << meanRelative - << " Expected Mean Relative Error = " << config.meanRelativeError; - generateInfo(results, expected); - FAIL() << strm.str(); - } - } - } - if(0 != config.maxRelativeError) { - for(auto expectedPair : allExpected) { - Blob::Ptr output = outputBlobs[expectedPair.first]; - if (!output) { - FAIL() << "Was not able to find expected output " << expectedPair.first; - } - auto results = prepareResults(output); - - std::map& expected = expectedPair.second; - - double maxRelative = 0; - std::size_t maxPos = 0; - for (auto e : expected) { - double eps = fabs(e.second - results[e.first]) / fabs(e.second); - if(eps > maxRelative) { - maxRelative = eps; - maxPos = e.first; - } - } - maxRelative *= 100; - - if (maxRelative > config.maxRelativeError) { - strm << "In blob " << expectedPair.first << " element at " << maxPos << " index" - << " expected to be " << expected[maxPos] << " but in fact it is " << results[maxPos] - << " Max Relative Error = " << maxRelative - << " Expected Max Relative Error = " << config.maxRelativeError; - generateInfo(results, expected); - FAIL() << strm.str(); - } - } - } - } -} - -} -} // namespace matchers diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp deleted file mode 100644 index 45afd5a28802d4..00000000000000 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "segmentation_matcher.hpp" - -static std::vector> blobToImageOutputArray(InferenceEngine::TBlob::Ptr output, - size_t *pWidth, size_t *pHeight, - size_t *pChannels) { - std::vector> outArray; - size_t W = 0, C = 0, H = 0; - - auto outputDims = output->getTensorDesc().getDims(); - if (outputDims.size() == 3) { - C = outputDims.at(0); - H = outputDims.at(1); - W = outputDims.at(2); - } else if (outputDims.size() == 4) { - C = outputDims.at(1); - H = outputDims.at(2); - W = outputDims.at(3); - } else if (outputDims.size() == 5) { - C = outputDims.at(1); - H = outputDims.at(3); - W = outputDims.at(4); - } else { - IE_THROW() << "Output blob has unsupported layout " << output->getTensorDesc().getLayout(); - } - - // Get classes - const float *outData = output->data(); - for (unsigned h = 0; h < H; h++) { - std::vector row; - for (unsigned w = 0; w < W; w++) { - float max_value = outData[h * W + w]; - size_t index = 0; - for (size_t c = 1; c < C; c++) { - size_t dataIndex = c * H * W + h * W + w; - if (outData[dataIndex] > max_value) { - index = c; - max_value = outData[dataIndex]; - } - } - row.push_back(index); - } - outArray.push_back(row); - } - - if (pWidth != nullptr) *pWidth = W; - if (pHeight != nullptr) *pHeight = H; - if (pChannels != nullptr) *pChannels = C; - - return outArray; -} - -namespace Regression { namespace Matchers { - -void SegmentationMatcher::match() { - // Read network - std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin"; - auto network = config.ie_core->ReadNetwork(config._path_to_models, binFileName); - - // Change batch size if it is not equal 1 - InferenceEngine::InputsDataMap inputs; - inputs = network.getInputsInfo(); - ASSERT_EQ(inputs.size() ,1); - InferenceEngine::InputInfo::Ptr ii = inputs.begin()->second; - - InferenceEngine::SizeVector inputDims = ii->getTensorDesc().getDims(); - if (inputDims.at(0) != 1) { - std::cerr << "[WARNING]: Batch size will be equal 1." << std::endl; - network.setBatchSize(1); - inputDims = ii->getTensorDesc().getDims(); - } - - InferenceEngine::OutputsDataMap outInfo; - outInfo = network.getOutputsInfo(); - ASSERT_EQ(outInfo.size(), 1); - ASSERT_NE(outInfo.begin()->second, nullptr); - - InferenceEngine::SizeVector outputDims = outInfo.begin()->second->getDims(); - - if (outputDims.size() != 4) { - IE_THROW() << "Incorrect output dimensions for Deconvolution model"; - } - - // Read image - FormatReader::ReaderPtr reader(config._paths_to_images[0].c_str()); - if (reader.get() == nullptr) { - IE_THROW() << "[ERROR]: Image " << config._paths_to_images[0] << " cannot be read!"; - } - - int inputNetworkSize = static_cast(std::accumulate( - inputDims.begin(), inputDims.end(), (size_t)1, std::multiplies())); - - if (reader->size() != inputNetworkSize) { - IE_THROW() << "[ERROR]: Input sizes mismatch, got " << reader->size() << " bytes, expecting " - << inputNetworkSize; - } - - // Allocate blobs - InferenceEngine::Blob::Ptr input; - switch (inputs.begin()->second->getPrecision()) { - case InferenceEngine::Precision::FP32 : - input = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::FP32, inputDims, NCHW }); - break; - case InferenceEngine::Precision::Q78 : - case InferenceEngine::Precision::I16 : - input = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::I16, inputDims, NCHW }); - break; - case InferenceEngine::Precision::U8 : - input = InferenceEngine::make_shared_blob({ InferenceEngine::Precision::U8, inputDims, NCHW }); - break; - default: - IE_THROW() << "Unsupported network precision: " << inputs.begin()->second->getPrecision(); - } - input->allocate(); - - output = InferenceEngine::make_shared_blob(outInfo.begin()->second->getTensorDesc()); - output->allocate(); - - // Load image to blob - ConvertImageToInput(reader->getData().get(), reader->size(), *input); - - auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config); - InferenceEngine::ExecutableNetwork executableNetwork; - if (config.useExportImport) { - std::stringstream stream; - loadedExecutableNetwork.Export(stream); - executableNetwork = config.ie_core->ImportNetwork(stream); - } else { - executableNetwork = loadedExecutableNetwork; - } - - auto inferRequest = executableNetwork.CreateInferRequest(); - inferRequest.SetBlob(inputs.begin()->first.c_str(), input); - inferRequest.SetBlob(outInfo.begin()->first.c_str(), output); - - // Infer model - inferRequest.Infer(); - - // Convert output data and save it to image - outArray = blobToImageOutputArray(output, nullptr, nullptr, &C); -} - -float SegmentationMatcher::compareOutputBmp(std::vector> data, size_t classesNum, const std::string& inFileName) { - unsigned int seed = (unsigned int)time(NULL); - std::vector colors = { - {128, 64, 128}, - {232, 35, 244}, - {70, 70, 70}, - {156, 102, 102}, - {153, 153, 190}, - {153, 153, 153}, - {30, 170, 250}, - {0, 220, 220}, - {35, 142, 107}, - {152, 251, 152}, - {180, 130, 70}, - {60, 20, 220}, - {0, 0, 255}, - {142, 0, 0}, - {70, 0, 0}, - {100, 60, 0}, - {90, 0, 0}, - {230, 0, 0}, - {32, 11, 119}, - {0, 74, 111}, - {81, 0, 81} - }; - while (classesNum > colors.size()) { - static std::mt19937 rng(seed); - std::uniform_int_distribution dist(0, 255); - Color color(dist(rng), dist(rng), dist(rng)); - colors.push_back(color); - } - - - FormatReader::ReaderPtr rd(inFileName.c_str()); - if (rd.get() == nullptr) { - IE_THROW() << "[ERROR]: Image " << inFileName << " cannot be read!"; - } - - auto height = data.size(); - auto width = data.at(0).size(); - - if (rd.get()->width() != width || rd.get()->height() != height) { - return 0.0; - } - - float rate = 0.0; - - unsigned char* pixels = rd.get()->getData().get(); - - for (size_t y = 0; y < height; y++) { - for (size_t x = 0; x < width; x++) { - unsigned char pixel[3]; - size_t index = data.at(y).at(x); - pixel[0] = colors.at(index).red(); - pixel[1] = colors.at(index).green(); - pixel[2] = colors.at(index).blue(); - - unsigned char pixelR[3]; - pixelR[0] = pixels[(y*width + x)*3 + 0]; - pixelR[1] = pixels[(y*width + x)*3 + 1]; - pixelR[2] = pixels[(y*width + x)*3 + 2]; - - if (pixel[0] == pixelR[0] && - pixel[1] == pixelR[1] && - pixel[2] == pixelR[2]) { - - rate ++; - } - } - } - - rate /= (width * height); - return rate; -} - -void SegmentationMatcher::checkResult(std::string imageFileName) { - std::ifstream inFile; - - float rate = compareOutputBmp(outArray, C, TestDataHelpers::get_data_path() + "/test_results/" + imageFileName/*ifs*/); - - float dist = 1.0f - rate; - if (dist > config.nearValue) { - FAIL() << "Comparison distance " << dist << " is greater than " << config.nearValue; - } else { - std::cout << "Comparison distance " << dist << " is smaller than " << config.nearValue << std::endl; - } -} - -} } // namespace matchers diff --git a/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt index b812637b656a31..9ef991ffee33ae 100644 --- a/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt @@ -2,55 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME MklDnnFunctionalTests) +add_executable(MklDnnFunctionalTests dummy.cpp) -file(GLOB MKL_DNN_TEST_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/config_param_test/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/extensions_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/network_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/regression_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/common_single_layer_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/graph_tools/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/inference_engine_regression_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/input_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/io_blob_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/lstm/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/snippet_test/*.cpp - ) - -list(APPEND MKL_DNN_LIBS - IESharedTests - ${Boost_REGEX_LIBRARY}) - -list(APPEND TEST_SRC ${MKL_DNN_TEST_SOURCES}) -list(APPEND LIBRARIES ${MKL_DNN_LIBS}) - -list(APPEND DEPENDENCIES - MKLDNNPlugin) - -source_group("src" FILES ${TEST_SRC}) -source_group("include" FILES ${TEST_INCLUDE}) - -add_executable(${TARGET_NAME} - ${TEST_SRC} - ${REGRESSION_TESTS} - ${TEST_INCLUDE}) - -target_compile_definitions(${TARGET_NAME} - PUBLIC ${ARGV} - DATA_PATH=\"${DATA_PATH}\" - MODELS_PATH=\"${MODELS_PATH}\" PARENT_SCOPE) - -target_include_directories(${TARGET_NAME} PRIVATE - ${IE_MAIN_SOURCE_DIR}/src/mkldnn_plugin) - -target_link_libraries(${TARGET_NAME} PRIVATE ${LIBRARIES}) - -set_ie_threading_interface_for(${TARGET_NAME}) - -add_dependencies(${TARGET_NAME} ${DEPENDENCIES}) - -add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) -set_property(TEST ${TARGET_NAME} PROPERTY LABELS CPU) +target_link_libraries(MklDnnFunctionalTests PRIVATE gtest_main) diff --git a/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp b/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp index ffe853f7697581..ace1c25a6d1721 100644 --- a/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp +++ b/inference-engine/tests_deprecated/functional/mkldnn/dummy.cpp @@ -1,4 +1,3 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - diff --git a/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt b/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt index f31d579ba85b57..a5ec4ed85abb3c 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt @@ -8,8 +8,9 @@ disable_deprecated_warnings() list(APPEND SHARED_LIBRARIES ${NGRAPH_LIBRARIES} - ie_tests + commonTestUtils ngraphFunctions + ieTestHelpers ) file(GLOB SHARED_TESTS_SRC @@ -28,11 +29,6 @@ ie_faster_build(${TARGET_NAME} PCH PRIVATE "precomp.hpp" ) -if(ENABLE_MKL_DNN) - add_dependencies(${TARGET_NAME} MKLDNNPlugin) - target_compile_definitions(${TARGET_NAME} PUBLIC ENABLE_MKL_DNN) -endif() - # Find OpenCV components if exist find_package(OpenCV COMPONENTS core imgproc QUIET) if(OpenCV_FOUND) @@ -44,11 +40,9 @@ endif() target_include_directories(${TARGET_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/io_blob_tests ${CMAKE_CURRENT_SOURCE_DIR}/input_tests - ${CMAKE_CURRENT_SOURCE_DIR}/inference_engine_regression_tests ${CMAKE_CURRENT_SOURCE_DIR}/lstm ${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests - ${CMAKE_CURRENT_SOURCE_DIR}/graph_tools $ ) diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp index 6584b08cbe0b71..c2cab4f96a7925 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp @@ -4,8 +4,9 @@ #pragma once +#include #include -#include +#include #include #include "common_test_utils/common_layers_params.hpp" diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp index 92976e9cbc11ac..6cc9b0abc6c9c0 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp @@ -15,7 +15,9 @@ #include "deconv_ref.hpp" #include "def_conv_ref.hpp" #include "pool_ref.hpp" +#include "single_layer_common.hpp" #include "common_test_utils/common_layers_params.hpp" +#include using namespace InferenceEngine; diff --git a/inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp deleted file mode 100644 index 5ce1b544c59526..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include - -using namespace testing; -using namespace InferenceEngine::details; -using namespace InferenceEngine; -using namespace std; - -class GraphToolsFncTest : public ::testing::Test { -public: - template - static void checkSort(const T &sorted) { - for (int i = 0; i < sorted.size(); i++) { - //check that all input already visited: - for (auto &inputs : sorted[i]->insData) { - auto inputName = getCreatorLayer(inputs.lock()).lock()->name; - - bool bFound = false; - for (int j = 0; j < i; j++) { - if (sorted[j]->name == inputName) { - bFound = true; - break; - } - } - ASSERT_TRUE(bFound) << "order is not correct, layer " << sorted[i]->name << " has missed input: " - << inputName; - } - } - } -}; diff --git a/inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp b/inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp deleted file mode 100644 index c7c0d361b4e84b..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; - -class CommonDynBatchFuncTestParams { -public: - std::string deviceName; - double nearValue; - int batch_limit; - int cur_batch; - - CommonDynBatchFuncTestParams(const std::string &_deviceName, - int blimit, - int batch, - double _nearValue = 0.01f) : - deviceName(_deviceName), - batch_limit(blimit), - cur_batch(batch), - nearValue(_nearValue) {} -}; - -template -class TestNoRegressionDynBatch : public Regression::RegressionTests, - public WithParamInterface { - void SetUp() override { -// PluginCache::; - } - - std::string getDeviceName() const override { - return GetParam().deviceName; - } - -public: - int get_batch_limit() { - return GetParam().batch_limit; - } - - int get_cur_batch() { - return GetParam().cur_batch; - } -}; - -using TestNoRegressionDynBatchFP32 = TestNoRegressionDynBatch; - -TEST_P(TestNoRegressionDynBatchFP32, dynBatch) { - int bl = get_batch_limit(); - int bsz = get_cur_batch(); - auto fnPtr = ngraph::builder::subgraph::makeSingleConv({static_cast(bl), 3, 24, 24}); - - CNNNetwork net(fnPtr); - auto ieCore = PluginCache::get().ie(); - InferenceEngine::ExecutableNetwork exeNet = ieCore->LoadNetwork(net, GetParam().deviceName, - {{PluginConfigParams::KEY_DYN_BATCH_ENABLED, - PluginConfigParams::YES}}); - InferenceEngine::InferRequest inferRequest = exeNet.CreateInferRequest(); - - auto blob = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc()); - - inferRequest.SetBatch(bsz); - inferRequest.SetBlob(net.getInputsInfo().begin()->first, blob); - inferRequest.Infer(); - auto *outRawData = inferRequest.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as(); - const auto blobSize = blob->byteSize(); - const auto inBlobBuf = blob->cbuffer().as(); - std::vector inData(inBlobBuf, inBlobBuf + blobSize); - auto refOutData = ngraph::helpers::interpreterFunction(fnPtr, {inData}); - - float thr1, thr2; - FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32, thr1, thr2); - - std::vector inShapeLimited{size_t(bsz), 4, 20, 20}; - size_t outElementsCount = std::accumulate(begin(inShapeLimited), end(inShapeLimited), 1, std::multiplies()); - FuncTestUtils::compareRawBuffers(outRawData, reinterpret_cast(refOutData[0].data()), - outElementsCount, outElementsCount, - FuncTestUtils::CompareType::ABS_AND_REL, - thr1, thr2); -} - -std::string getTestCaseName(TestParamInfo obj) { - return obj.param.deviceName + "_" + std::to_string(obj.param.batch_limit) - + "_" + std::to_string(obj.param.cur_batch); -} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp index 0222e8678fcd7e..0b948654fc8cf7 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp @@ -7,7 +7,6 @@ #include #include #include - #include "common_test_utils/xml_net_builder/xml_net_builder.hpp" struct layer_params { diff --git a/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp index 8de15d34b17cc7..7000bedee2596b 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp @@ -11,7 +11,6 @@ #include #include "tests_common.hpp" -#include "tests_common_func.hpp" #include "format_reader_ptr.h" #include "single_layer_common.hpp" diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp deleted file mode 100644 index ca1d9fcac16291..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "plg_test.hpp" - -#include -#include -#include - -// library taken from https://github.com/llohse/libnpy -#include "npy.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct ModelInfo { - std::string dir, xml, bin; -}; - -class LSTM_IR_Test : public PlgTest { -protected: - virtual void SetUp() { - PlgTest::SetUp(); - auto p = param(); - } -}; - -TEST_P(LSTM_IR_Test, canParseLSTM) { - auto fn_ptr = ngraph::builder::subgraph::makeTIwithLSTMcell(); - CNNNetwork net(fn_ptr); - - Core ie; - auto exec_net = ie.LoadNetwork(net, device_name); - auto inf_req = exec_net.CreateInferRequest(); - - auto _load_from_npy = [&](std::string name) { - std::replace(name.begin(), name.end(), '\\', '_'); - std::replace(name.begin(), name.end(), '/', '_'); - auto file_path = name + ".npy"; - - std::ifstream npy_file(file_path); - std::vector npy_shape; - std::vector npy_data; - if (npy_file.good()) - npy::LoadArrayFromNumpy(file_path, npy_shape, npy_data); - - return npy_data; - }; - - // auto _save_to_npy = [&](std::string name, - // const std::vector& npy_shape, - // const std::vector& npy_data) { - // std::replace(name.begin(), name.end(), '\\', '_'); - // std::replace(name.begin(), name.end(), '/', '_'); - // auto file_path = name + ".npy"; - - // npy::SaveArrayAsNumpy(file_path, false, (unsigned int)(npy_shape.size()), npy_shape.data(), npy_data); - // }; - - for (auto &info: net.getInputsInfo()) { - auto blob = inf_req.GetBlob(info.first); - auto npy = _load_from_npy(info.first); - - if (!npy.empty()) - std::copy_n(npy.data(), npy.size(), blob->buffer().as()); - } - - inf_req.Infer(); - - for (auto &info : net.getOutputsInfo()) { - auto blob = inf_req.GetBlob(info.first); - auto npy = _load_from_npy(info.first); - - if (!npy.empty()) - TestsCommon::compare(blob->buffer().as(), npy.data(), npy.size()); - - /* auto dims = blob->dims(); - - std::vector shape; - for (auto d : dims) shape.push_back(d); - - std::vector npy_data(blob->buffer().as(), blob->buffer().as() + blob->size()); - _save_to_npy(plugin_name + "_" + info.first, shape, npy_data); */ - } -} - -static std::vector workload = { -/* Directory | XML name | Bin name */ -{"Basic_LSTM_S/FP32", "Basic_LSTM_S"}, -}; diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp deleted file mode 100644 index 170378f66475e8..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include -#include - -using namespace ::testing; -using namespace InferenceEngine; -using std::vector; - -struct bin_conv_base_params { - vector in_dims; - vector kernel; - vector strides; - vector pads_begin; - vector pads_end; - vector dilations; - - size_t out_c; - size_t grp_c; - - vector out_dims; - - float pad_value; -}; - -struct bin_conv_test_params : bin_conv_base_params { - std::string device_name; - - bin_conv_test_params(std::string name, bin_conv_base_params params) : - bin_conv_base_params(params), device_name(name) {} - -}; - -class BinaryConvolutionOnlyTest : public TestsCommon, - public WithParamInterface { - - std::string model_t_4D = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - _IN_ - _OC_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - -protected: - - static void fill_data_bin(float *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = sinf((float)i) > 0.f ? 1.f : -1.f; - } - } - - static void fill_data_bin_packed(int8_t *data, size_t size) { - int nbits = 8; - for (size_t i = 0; i < div_up(size, nbits); i++) { - data[i] = static_cast(i % 255); - } - } - - size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) { - return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu; - } - - void createBlobs(const bin_conv_test_params &p, TBlob::Ptr &src, TBlob::Ptr &dst, TBlob::Ptr &dst_ref) { - auto in_size = p.in_dims.size(); - auto out_size = p.out_dims.size(); - SizeVector dims_src; - for (int i = in_size; i > 0; i--) { - dims_src.insert(dims_src.begin(), p.in_dims[i - 1]); - } - - SizeVector dims_dst = { - 1lu, - p.out_c, - p.out_dims[out_size - 2] == 0 ? calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2], - p.out_dims[out_size - 1] == 0 ? calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1] - }; - - Layout layout = NCHW; - if (in_size == 5) { - layout = NCDHW; - - dims_dst.insert(dims_dst.begin() + 3, - p.out_dims.size() > 2 ? - (p.out_dims[out_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu); - } - - src = make_shared_blob({Precision::FP32, dims_src, layout}); - src->allocate(); - - dst = make_shared_blob({Precision::FP32, dims_dst, layout}); - dst->allocate(); - - dst_ref = make_shared_blob({Precision::FP32, dims_dst, layout}); - dst_ref->allocate(); - } - - TBlob::Ptr fillWeights(const bin_conv_test_params &p) { - auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - TBlob *weights_ptr = new TBlob({Precision::BIN, - {(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c)}, - Layout::C}); - weights_ptr->allocate(); - fill_data_bin_packed(weights_ptr->buffer(), weights_ptr->size()); - return TBlob::Ptr(weights_ptr); - } - - - struct bin_conv_common_params { - InferenceEngine::PropertyVector stride; - InferenceEngine::PropertyVector kernel; - InferenceEngine::PropertyVector pads_begin; - InferenceEngine::PropertyVector pads_end; - InferenceEngine::PropertyVector dilation; - std::string auto_pad; - size_t group; - size_t out_c; - float pad_value; - }; - - void ref_bin_conv_common(const Blob& src, - Blob& dst, - const uint8_t* weights_data, - const bin_conv_common_params& prm) { - if (src.getTensorDesc().getLayout() != Layout::NCHW && - dst.getTensorDesc().getLayout() != Layout::NCDHW) - IE_THROW() << "Reference FP32 convolution supports NCHW and NCDHW layouts only"; - size_t KW = prm.kernel[X_AXIS]; - size_t KH = prm.kernel[Y_AXIS]; - size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu; - - size_t SW = prm.stride[X_AXIS]; - size_t SH = prm.stride[Y_AXIS]; - size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu; - - size_t DW = prm.dilation[X_AXIS]; - size_t DH = prm.dilation[Y_AXIS]; - size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu; - - size_t PW = prm.pads_begin[X_AXIS]; - size_t PH = prm.pads_begin[Y_AXIS]; - size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu; - - size_t GC = prm.group; - - auto src_dims = src.getTensorDesc().getDims(); - size_t IW, IH, ID, IC = src_dims[1]; - - if (src_dims.size() == 5lu) { - IW = src_dims[4]; - IH = src_dims[3]; - ID = src_dims[2]; - } else { - IW = src_dims[3]; - IH = src_dims[2]; - ID = 1lu; - } - - auto dst_dims = dst.getTensorDesc().getDims(); - size_t OW, OH, OD; - size_t OC = prm.out_c; - - if (dst_dims.size() == 5lu) { - OW = dst_dims[4]; - OH = dst_dims[3]; - OD = dst_dims[2]; - } - else { - OW = dst_dims[3]; - OH = dst_dims[2]; - OD = 1lu; - } - - const auto* src_data = src.cbuffer().as(); - auto* dst_data = dst.buffer().as(); - - int nbits = 8; - - auto extract_weights = [](uint8_t val, uint8_t bit) -> float { - return (uint8_t)((val >> bit) & 0x0001) == 1 ? 1.f : -1.f; - }; - - for (uint32_t g = 0; g < GC; g++) { - for (uint32_t oc = 0; oc < OC / GC; oc++) { - for (uint32_t od = 0; od < OD; od++) { - for (uint32_t oh = 0; oh < OH; oh++) { - for (uint32_t ow = 0; ow < OW; ow++) { - size_t oidx = g * OC / GC * OD * OH * OW - + oc * OD * OH * OW - + od * OH * OW - + oh * OW - + ow; - - dst_data[oidx] = 0.f; - - for (size_t ic = 0; ic < IC / GC; ic++) { - for (size_t kd = 0; kd < KD; kd++) { - for (size_t kh = 0; kh < KH; kh++) { - for (size_t kw = 0; kw < KW; kw++) { - size_t widx = g * OC / GC * IC / GC * KD * KH * KW - + oc * IC / GC * KD * KH * KW - + ic * KD * KH * KW - + kd * KH * KW - + kh * KW - + kw; - float w = extract_weights(weights_data[widx/nbits], (uint8_t)(widx % nbits)); - - float s; - - int32_t iw = ow * SW - PW + kw * DW; - int32_t ih = oh * SH - PH + kh * DH; - int32_t id = od * SD - PD + kd * DD; - if (iw < 0 || iw >= (int32_t) IW || - ih < 0 || ih >= (int32_t) IH || - id < 0 || id >= (int32_t) ID) { - s = prm.pad_value; - } else { - size_t iidx = g * IC / GC * ID * IH * IW - + ic * ID * IH * IW - + id * IH * IW - + ih * IW - + iw; - s = src_data[iidx]; - } - - dst_data[oidx] += s * w; - } - } - } - } - } - } - } - } - } - } - - void calculateRef(const TBlob::Ptr &weights, const bin_conv_test_params &p, const TBlob::Ptr &src, - TBlob::Ptr &dst_ref) { - const uint8_t *weights_data = (const uint8_t *)weights->buffer(); - size_t bias_size = p.out_c; - bin_conv_common_params params; - for (int i = 0; i < p.kernel.size(); i++) - params.kernel.insert(i, p.kernel[i]); - for (int i = 0; i < p.strides.size(); i++) - params.stride.insert(i, p.strides[i]); - for (int i = 0; i < p.pads_begin.size(); i++) - params.pads_begin.insert(i, p.pads_begin[i]); - for (int i = 0; i < p.dilations.size(); i++) - params.dilation.insert(i, p.dilations[i]); - params.group = p.grp_c; - params.out_c = p.out_c; - params.pad_value = p.pad_value; - - ref_bin_conv_common(*src.get(), *dst_ref.get(), weights_data, params); - } - - CNNNetwork getNetwork(const TBlob::Ptr &weights, const bin_conv_test_params &p) { - Core ie; - return ie.ReadNetwork(getModel(p), weights); - } - - virtual void - infer(CNNNetwork &network, const bin_conv_test_params &p, TBlob::Ptr &src, TBlob::Ptr &dst) { - Core ie; - ExecutableNetwork executable_network = ie.LoadNetwork(network, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - InputsDataMap inputInfo(network.getInputsInfo()); - inferRequest.SetBlob(inputInfo.begin()->first, src); - - OutputsDataMap outputInfo(network.getOutputsInfo()); - inferRequest.SetBlob(outputInfo.begin()->first, dst); - - inferRequest.Infer(); - } - - void SetUp() override { - try { - auto p = ::testing::WithParamInterface::GetParam(); - TBlob::Ptr src, dst, dst_ref; - - createBlobs(p, src, dst, dst_ref); - fill_data_bin(src->data(), src->size()); - - auto weights = fillWeights(p); - calculateRef(weights, p, src, dst_ref); - - CNNNetwork network = getNetwork(weights, p); - infer(network, p, src, dst); - - compare(*dst, *dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } - - virtual std::string getModel(bin_conv_test_params p) { - std::string model; - auto in_dims_size = p.in_dims.size(); - model = model_t_4D; - - REPLACE_WITH_NUM(model, "_IW_", p.in_dims[in_dims_size - 1]); - REPLACE_WITH_NUM(model, "_IH_", p.in_dims[in_dims_size - 2]); - REPLACE_WITH_NUM(model, "_ID_", p.in_dims[in_dims_size - 3]); - REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]); - REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]); - - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end); - REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations); - - auto out_dims_size = p.out_dims.size(); - REPLACE_WITH_NUM(model, "_GC_", p.grp_c); - REPLACE_WITH_NUM(model, "_OC_", p.out_c); - REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ? - (p.out_dims[out_dims_size - 3] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]) : - 1lu); - REPLACE_WITH_NUM(model, "_OH_", p.out_dims[out_dims_size - 2] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2]); - REPLACE_WITH_NUM(model, "_OW_", p.out_dims[out_dims_size - 1] == 0 ? - calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1]); - - size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu; - - int nbits = 8; - size_t w_data_size = div_up(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c, nbits); - REPLACE_WITH_NUM(model, "_S1_", w_data_size); - - REPLACE_WITH_NUM(model, "_PV_", p.pad_value); - REPLACE_WITH_STR(model, "_M_", "xnor-popcount"); - - return model; - } -}; - -#define case_1 bin_conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f}) -#define case_2 bin_conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f}) -#define case_3 bin_conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f}) -#define case_4 bin_conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f}) -#define case_5 bin_conv_base_params({{1lu, 9lu, 32lu, 16lu}, {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 1.f}) -#define case_6 bin_conv_base_params({{1lu, 3lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}, 0.f}) -#define case_7 bin_conv_base_params({{1lu, 9lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f}) -#define case_8 bin_conv_base_params({{1lu, 9lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f}) -#define case_9 bin_conv_base_params({{1lu, 9lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 1.f}) -#define case_10 bin_conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}, 0.f}) -#define case_11 bin_conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}, 0.f}) -#define case_12 bin_conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}, 0.f}) -#define case_13 bin_conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}, 0.f}) -#define case_14 bin_conv_base_params({{1lu, 19lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 21lu, 1lu, {0lu, 0lu}, -1.f}) -#define case_15 bin_conv_base_params({{1lu, 17lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 19lu, 1lu, {0lu, 0lu}, 0.f}) -#define case_16 bin_conv_base_params({{1lu, 21lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 33lu, 1lu, {0lu, 0lu}, 1.f}) - -TEST_P(BinaryConvolutionOnlyTest, TestsBinaryConvolution) { -} - -std::string getTestCaseName(testing::TestParamInfo obj) { - auto in_dims_size = obj.param.in_dims.size(); - return obj.param.device_name + - "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) + - "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) + - (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") + - "_c" + std::to_string(obj.param.in_dims[1]) + - "_kw" + std::to_string(obj.param.kernel[X_AXIS]) + - "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) + - (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") + - "_sw" + std::to_string(obj.param.strides[X_AXIS]) + - "_sh" + std::to_string(obj.param.strides[Y_AXIS]) + - (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") + - "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) + - "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) + - (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") + - "_grpc" + std::to_string(obj.param.grp_c) + - "_pad_v" + std::to_string(obj.param.pad_value); -} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp deleted file mode 100644 index 16f18da5fb97f7..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "common_test_utils/data_utils.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct deformable_psroi_test_params { - std::string device_name; - - std::vector src_dims; - std::vector bbox_dims; - std::vector out_dims; - float spatial_scale; - size_t output_dim; - size_t group_size; - size_t pooled_height; - size_t pooled_width; - int part_size; - int sample_per_part; - bool no_trans; - float trans_std; - std::vector trans_dims; -}; - -inline float bilinear_interp(const float* data, const float x, const float y, const int width, const int height) { - int x1 = static_cast(std::floor(x)); - int x2 = static_cast(std::ceil(x)); - int y1 = static_cast(std::floor(y)); - int y2 = static_cast(std::ceil(y)); - float dist_x = x - x1; - float dist_y = y - y1; - float value11 = data[y1 * width + x1]; - float value12 = data[y2 * width + x1]; - float value21 = data[y1 * width + x2]; - float value22 = data[y2 * width + x2]; - float value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 - + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; - return value; -} - -static void ref_deformable_psroi(const std::vector &srcs, std::vector &dsts, deformable_psroi_test_params prm) { - float* dst_data = dsts[0]->buffer(); - const float *bottom_data_beginning = srcs[1]->buffer(); - const float *bottom_rois_beginning = srcs[0]->buffer(); - - SizeVector inDims = srcs[1]->getTensorDesc().getDims(); - int channels = static_cast(inDims[1]); - int height = static_cast(inDims[2]); - int width = static_cast(inDims[3]); - - SizeVector outDims = dsts[0]->getTensorDesc().getDims(); - int nn = static_cast(outDims[0]); - int nc = static_cast(outDims[1]); - int nh = static_cast(outDims[2]); - int nw = static_cast(outDims[3]); - - int real_rois = 0; - for (; real_rois < nn; real_rois++) { - const float *bottom_rois = bottom_rois_beginning + real_rois * 5; - int roi_batch_ind = static_cast(bottom_rois[0]); - if (roi_batch_ind == -1) { - break; - } - } - - float *bottom_trans = nullptr; - int num_classes = 1; - int channels_each_class = prm.output_dim; - if (srcs.size() == 3) { - bottom_trans = srcs[2]->buffer(); - num_classes = static_cast(srcs[2]->getTensorDesc().getDims()[1]) / 2; - channels_each_class /= num_classes; - } - - for (int n = 0; n < real_rois; n++) { - const float *bottom_rois = bottom_rois_beginning + n * 5; - int roi_batch_ind = static_cast(bottom_rois[0]); - float roi_start_w = static_cast(round(bottom_rois[1])) * prm.spatial_scale - 0.5; - float roi_start_h = static_cast(round(bottom_rois[2])) * prm.spatial_scale - 0.5; - float roi_end_w = static_cast(round(bottom_rois[3]) + 1.0) * prm.spatial_scale - 0.5; - float roi_end_h = static_cast(round(bottom_rois[4]) + 1.0) * prm.spatial_scale - 0.5; - float roi_width = std::max(static_cast(roi_end_w - roi_start_w), 0.1); - float roi_height = std::max(static_cast(roi_end_h - roi_start_h), 0.1); - - for (int c = 0; c < nc; c++) { - for (int h = 0; h < nh; h++) { - for (int w = 0; w < nw; w++) { - size_t index = n*nc*nh*nw + c*nh*nw + h*nw + w; - dst_data[index] = 0.0f; - - float bin_size_h = roi_height / static_cast(prm.pooled_height); - float bin_size_w = roi_width / static_cast(prm.pooled_width); - - float sub_bin_size_h = bin_size_h / static_cast(prm.sample_per_part); - float sub_bin_size_w = bin_size_w / static_cast(prm.sample_per_part); - - int part_h = static_cast(std::floor(static_cast(h) / prm.pooled_height * prm.part_size)); - int part_w = static_cast(std::floor(static_cast(w) / prm.pooled_width * prm.part_size)); - - int class_id = c / channels_each_class; - float trans_x = prm.no_trans ? 0 : - bottom_trans[(((n * num_classes + class_id) * 2) * prm.part_size + part_h) - * prm.part_size + part_w] * prm.trans_std; - float trans_y = prm.no_trans ? 0 : - bottom_trans[(((n * num_classes + class_id) * 2 + 1) * prm.part_size + part_h) - * prm.part_size + part_w] * prm.trans_std; - - float wstart = w * bin_size_w + roi_start_w + trans_x * roi_width; - float hstart = h * bin_size_h + roi_start_h + trans_y * roi_height; - - float sum = 0; - int count = 0; - int gw = (static_cast(w) * prm.group_size / prm.pooled_width ); - int gh = (static_cast(h) * prm.group_size / prm.pooled_height ); - gw = std::min(std::max(gw, 0), static_cast(prm.group_size - 1)); - gh = std::min(std::max(gh, 0), static_cast(prm.group_size - 1)); - - const float* offset_bottom_data = bottom_data_beginning + (roi_batch_ind * channels) * height * width; - for (size_t ih = 0; ih < prm.sample_per_part; ih++) { - for (size_t iw = 0; iw < prm.sample_per_part; iw++) { - float w1 = wstart + iw * sub_bin_size_w; - float h1 = hstart + ih * sub_bin_size_h; - // bilinear interpolation - if (w1 < -0.5 || w1 > width - 0.5 || h1 < -0.5 || h1 > height - 0.5) - continue; - w1 = std::min(std::max(w1, 0.0f), width - 1.0f); - h1 = std::min(std::max(h1, 0.0f), height - 1.0f); - int c1 = static_cast((c * prm.group_size + gh) * prm.group_size + gw); - float val = bilinear_interp(offset_bottom_data + c1 * height * width, w1, h1, width, height); - sum += val; - count++; - } - } - dst_data[index] = count == 0 ? 0 : sum / count; - } - } - } - } - for (int n = real_rois; n < nn; n++) { - for (int c = 0; c < nc; c++) { - for (int h = 0; h < nh; h++) { - for (int w = 0; w < nw; w++) { - int index = n * nc * nh * nw + c * nh * nw + h * nw + w; - dst_data[index] = 0.0f; - } - } - } - } -} - -class DeformablePSROIOnlyTest : public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - __SRC_DIMS__ - - - - - - __BBOX_DIMS__ - - - __TRANS__ - - - - __SRC_DIMS__ - - __BBOX_DIMS__ - __TRANS_DIMS__ - - - __OUT_DIMS__ - - - - - - - __EDGE_TRANS__ - - -)V0G0N"; - - std::string getModel(deformable_psroi_test_params p) { - std::string model = model_t; - - std::string no_trans = "True"; - std::string trans = ""; - std::string trans_params = ""; - std::string trans_dims = ""; - std::string edge_trans = ""; - if (!p.no_trans) { - no_trans = "False"; - - trans = R"VOGON( - - __TRANS_DIMS__ - - )VOGON"; - - trans_params += " trans_std=\"" + std::to_string(p.trans_std) + "\""; - - trans_dims += "\n "; - for (auto &dim : p.trans_dims) { - trans_dims += "\n "; - trans_dims += std::to_string(dim) + ""; - } - trans_dims += "\n "; - - edge_trans = "\n "; - } - REPLACE_WITH_STR(model, "__TRANS__", trans); - REPLACE_WITH_STR(model, "__TRANS_PARAMS__", trans_params); - REPLACE_WITH_STR(model, "__TRANS_DIMS__", trans_dims); - REPLACE_WITH_STR(model, "__EDGE_TRANS__", edge_trans); - - std::string src_dims = ""; - for (auto &dim : p.src_dims) { - src_dims += "\n "; - src_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims); - std::string bbox_dims = ""; - for (auto &dim : p.bbox_dims) { - bbox_dims += "\n "; - bbox_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__BBOX_DIMS__", bbox_dims); - std::string out_dims = ""; - for (auto &dim : p.out_dims) { - out_dims += "\n "; - out_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model, "__OUT_DIMS__", out_dims); - - REPLACE_WITH_STR(model, "__NO_TRANS__", no_trans); - REPLACE_WITH_NUM(model, "__SPATIAL_SCALE__", p.spatial_scale); - REPLACE_WITH_NUM(model, "__OUTPUT_DIM__", p.output_dim); - REPLACE_WITH_NUM(model, "__PART_SIZE__", p.part_size); - REPLACE_WITH_NUM(model, "__GROUP_SIZE__", p.group_size); - REPLACE_WITH_NUM(model, "__POOLED_HEIGHT__", p.pooled_height); - REPLACE_WITH_NUM(model, "__POOLED_WIDTH__", p.pooled_width); - REPLACE_WITH_NUM(model, "__SAMPLE_PER_PART__", p.sample_per_part); - - return model; - } - -protected: - virtual void SetUp() { - try { - deformable_psroi_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - std::vector srcs_vec; - - InputsDataMap in_info_map = net.getInputsInfo(); - for (auto info : in_info_map) { - Blob::Ptr blob = make_shared_blob( - {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY}); - blob->allocate(); - if (info.second->name() == "data") { - CommonTestUtils::fill_data_sine(blob->buffer(), blob->size(), 1.0f, 5.0f, 0.1f); - } else if (info.second->name() == "bbox") { - CommonTestUtils::fill_data_bbox(blob->buffer(), blob->size(), p.src_dims[2], p.src_dims[3], 1.0f); - } else if (info.second->name() == "trans") { - CommonTestUtils::fill_data_sine(blob->buffer(), blob->size(), 0.0f, 10.0f, 1.0f); - } - - inferRequest.SetBlob(info.first, blob); - srcs_vec.push_back(blob); - } - - BlobMap dsts_map; - std::vector dsts_vec; - - OutputsDataMap out_info_map = net.getOutputsInfo(); - for (auto info : out_info_map) { - Blob::Ptr blob = make_shared_blob( - {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY}); - blob->allocate(); - inferRequest.SetBlob(info.first, blob); - dsts_map[info.first] = blob; - - Blob::Ptr blob_ref = make_shared_blob( - {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY}); - blob_ref->allocate(); - dsts_vec.push_back(blob_ref); - } - - ref_deformable_psroi(srcs_vec, dsts_vec, p); - - inferRequest.Infer(); - - TBlob* dstPtr = dynamic_cast*>(dsts_map.begin()->second.get()); - TBlob* dstrefPtr = dynamic_cast*>(dsts_vec[0].get()); - - compare(*dsts_map.begin()->second, *dsts_vec[0]); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(DeformablePSROIOnlyTest, TestsDeformable) {} - -/*** TBD ***/ - - - diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp deleted file mode 100644 index 8e431ee9326e83..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include -#include -#include - -using namespace InferenceEngine; - -struct gemm_base_params { - float alpha; - float beta; - bool transpose_A; - bool transpose_B; - SizeVector dims_A; - SizeVector dims_B; - SizeVector dims_C; - - gemm_base_params() = default; - gemm_base_params(float _alpha, - float _beta, - bool _transpose_A, - bool _transpose_B, - SizeVector _dims_A, - SizeVector _dims_B, - SizeVector _dims_C = {}) - : alpha(_alpha) - , beta(_beta) - , transpose_A(_transpose_A) - , transpose_B(_transpose_B) - , dims_A(_dims_A) - , dims_B(_dims_B) - , dims_C(_dims_C) - {} - - virtual void print(std::ostream& os) const { - os << "alpha: " << alpha << ", beta: " << beta - << ", trans A: " << transpose_A << ", trans B: " << transpose_B - << std::endl; - - auto print_dims = [&](std::string name, const SizeVector& dims) { - os << name << ": {"; - if (!dims.empty()) - os << dims[0]; - for (size_t i = 1; i < dims.size(); ++i) - os << ", " << dims[i]; - os << "}" << std::endl; - }; - - print_dims("A", dims_A); - print_dims("B", dims_B); - print_dims("C", dims_C); - } - - virtual SizeVector outDims() const { - size_t max_dims_num = std::max(dims_A.size(), dims_B.size()); - max_dims_num = std::max(dims_C.size(), max_dims_num); - - SizeVector dims_out(max_dims_num); - // Process batch dims in reverse for required alignment - for (size_t rbi = 0; rbi < max_dims_num - 2; ++rbi) { - size_t max_val = 1; - - if (rbi + 2 < dims_A.size()) { - auto bi_A = dims_A.size() - rbi - 3; - max_val = std::max(max_val, dims_A[bi_A]); - } - if (rbi + 2 < dims_B.size()) { - auto bi_B = dims_B.size() - rbi - 3; - max_val = std::max(max_val, dims_B[bi_B]); - } - if (rbi + 2 < dims_C.size()) { - auto bi_C = dims_C.size() - rbi - 3; - max_val = std::max(max_val, dims_C[bi_C]); - } - - auto bi_out = max_dims_num - rbi - 3; - dims_out[bi_out] = max_val; - } - - auto y_dim_A = transpose_A ? dims_A.size() - 1 : dims_A.size() - 2; - auto x_dim_B = transpose_B ? dims_B.size() - 2 : dims_B.size() - 1; - dims_out[dims_out.size() - 1] = dims_B[x_dim_B]; - dims_out[dims_out.size() - 2] = dims_A[y_dim_A]; - - return dims_out; - } -}; - - -std::vector ref_gemm(const gemm_base_params& params, - const std::vector& data_A, - const std::vector& data_B, - const std::vector& data_C) { - const auto& dims_A = params.dims_A; - const auto& dims_B = params.dims_B; - const auto& dims_C = params.dims_C; - - bool use_C = !dims_C.empty(); - - auto x_A = dims_A[dims_A.size() - 1]; - auto y_A = dims_A[dims_A.size() - 2]; - auto x_pitch_A = size_t(1); - auto y_pitch_A = x_A; - - auto x_B = dims_B[dims_B.size() - 1]; - auto y_B = dims_B[dims_B.size() - 2]; - auto x_pitch_B = size_t(1); - auto y_pitch_B = x_B; - - if (params.transpose_A) { - std::swap(x_A, y_A); - std::swap(x_pitch_A, y_pitch_A); - } - - if (params.transpose_B) { - std::swap(x_B, y_B); - std::swap(x_pitch_B, y_pitch_B); - } - - auto dims_out = params.outDims(); - - auto x_out = dims_out[dims_out.size() - 1]; - auto y_out = dims_out[dims_out.size() - 2]; - auto x_pitch_out = size_t(1); - auto y_pitch_out = x_out; - - auto out_batch_num = dims_out.size() - 2; - - // Calculates batch pitches in reverse order - auto calculate_batch_pitches = [out_batch_num](const SizeVector& dims) { - std::vector batch_pitches = { }; - batch_pitches.reserve(out_batch_num); - size_t real_pitch = dims[dims.size() - 2] * dims[dims.size() - 1]; - - for (size_t rbi = 0; rbi < out_batch_num; ++rbi) { - if (rbi + 2 < dims.size() && dims[dims.size() - rbi - 3] != 1) { - batch_pitches.push_back(real_pitch); - real_pitch *= dims[dims.size() - rbi - 3]; - } else { - // Set to zero for broadcasting - batch_pitches.push_back(0ul); - } - } - - return batch_pitches; - }; - - auto batch_pitches_A = calculate_batch_pitches(dims_A); - auto batch_pitches_B = calculate_batch_pitches(dims_B); - auto batch_pitches_C = use_C ? calculate_batch_pitches(dims_C) : std::vector(); - auto batch_pitches_out = calculate_batch_pitches(dims_out); - - auto k = x_A; - - auto total_out_size = std::accumulate(dims_out.begin(), dims_out.end(), 1ul, std::multiplies()); - std::vector data_out(total_out_size, 0.f); - - // Currently processed batch indices in reverse order - std::vector current_batch_indices(out_batch_num, 0ul); - auto get_current_batch_offset = [&](const std::vector& pitches) { - return std::inner_product(pitches.begin(), pitches.end(), current_batch_indices.begin(), 0ul); - }; - - do { - auto batch_offset_A = get_current_batch_offset(batch_pitches_A); - auto batch_offset_B = get_current_batch_offset(batch_pitches_B); - auto batch_offset_C = use_C ? get_current_batch_offset(batch_pitches_C) : 0ul; - auto batch_offset_out = get_current_batch_offset(batch_pitches_out); - - for (size_t yi = 0; yi < y_out; ++yi) { - for (size_t xi = 0; xi < x_out; ++xi) { - - float acc = 0.f; - if (params.alpha != 0.f) { - for (size_t ki = 0; ki < k; ++ki) { - auto idx_A = batch_offset_A + yi * y_pitch_A + ki * x_pitch_A; - auto idx_B = batch_offset_B + ki * y_pitch_B + xi * x_pitch_B; - - acc += data_A[idx_A] * data_B[idx_B]; - } - - acc *= params.alpha; - } - - if (use_C && params.beta != 0.f) { - auto idx_C = batch_offset_C + yi * y_pitch_out + xi * x_pitch_out; - acc += params.beta * data_C[idx_C]; - } - - auto idx_out = batch_offset_out + yi * y_pitch_out + xi * x_pitch_out; - data_out[idx_out] = acc; - } - } - - for (size_t i = 0; i < out_batch_num; ++i) { - current_batch_indices[i] += 1; - if (current_batch_indices[i] == dims_out[dims_out.size() - 3 - i] && - i != out_batch_num - 1) { // Don't reset last index as it signals end of calculations - current_batch_indices[i] = 0; - } else { - break; - } - } - } while (current_batch_indices.size() > 0 && - current_batch_indices[current_batch_indices.size() - 1] != dims_out[0]); - - return data_out; -} - -struct gemm_test_params : gemm_base_params { - std::string device_name; - std::string precision; - - gemm_test_params(std::string name, std::string _precision, gemm_base_params base) - : gemm_base_params(base) - , device_name(name) - , precision(_precision) - {} - - gemm_test_params(std::tuple wrapper) - : gemm_test_params(std::get<0>(wrapper), std::get<1>(wrapper), std::get<2>(wrapper)) - {} - - void print(std::ostream& os) const override { - os << "Device: " << device_name << ", precision: " << precision << std::endl; - gemm_base_params::print(os); - } -}; - -class GemmTestBase : public TestsCommon { - std::string model_t = R"V0G0N( - - - - - - _IN_A_DIMS_ - - - - - - - _IN_B_DIMS_ - - - - _IN_C_LAYER_ - - - - - _IN_A_DIMS_ - - - _IN_B_DIMS_ - - _IN_C_GEMM_PORT_ - - - - _OUT_DIMS_ - - - - - - - - _IN_C_EDGE_ - - -)V0G0N"; - -std::string in_C_layer = R"V0G0N( - - - - _IN_C_DIMS_ - - - -)V0G0N"; - -std::string in_C_port = R"V0G0N( - - _IN_C_DIMS_ - -)V0G0N"; - -std::string in_C_edge = R"V0G0N( - -)V0G0N"; - -protected: - virtual float getThreshold(const gemm_test_params& params) { - if (params.precision == "FP16") - return 0.02f; - else - return 0.01f; - } - - std::string getModel(const gemm_test_params& params) { - auto model = model_t; - - if (!params.dims_C.empty()) { - REPLACE_WITH_STR(model, "_IN_C_LAYER_", in_C_layer); - REPLACE_WITH_STR(model, "_IN_C_GEMM_PORT_", in_C_port); - REPLACE_WITH_STR(model, "_IN_C_EDGE_", in_C_edge); - } else { - REPLACE_WITH_STR(model, "_IN_C_LAYER_", ""); - REPLACE_WITH_STR(model, "_IN_C_GEMM_PORT_", ""); - REPLACE_WITH_STR(model, "_IN_C_EDGE_", ""); - } - - REPLACE_WITH_STR(model, "_PRECISION_", params.precision); - - REPLACE_WITH_NUM(model, "_ALPHA_", params.alpha); - REPLACE_WITH_NUM(model, "_BETA_", params.beta); - REPLACE_WITH_NUM(model, "_TRANS_A_", params.transpose_A); - REPLACE_WITH_NUM(model, "_TRANS_B_", params.transpose_B); - - auto get_dims_str = [](const SizeVector& dims) { - std::string result; - for (const auto& d : dims) { - result += "" + std::to_string(d) + "\n"; - } - return result; - }; - - std::string in_A_dims = get_dims_str(params.dims_A); - std::string in_B_dims = get_dims_str(params.dims_B); - std::string in_C_dims = get_dims_str(params.dims_C); - std::string out_dims = get_dims_str(params.outDims()); - - REPLACE_WITH_STR(model, "_IN_A_DIMS_", in_A_dims); - REPLACE_WITH_STR(model, "_IN_B_DIMS_", in_B_dims); - REPLACE_WITH_STR(model, "_IN_C_DIMS_", in_C_dims); - REPLACE_WITH_STR(model, "_OUT_DIMS_", out_dims); - - return model; - } - - CNNNetwork getNetwork(Core & ie, const gemm_test_params& params) { - std::string model = getModel(params); - - CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr()); - - network.getInputsInfo().at("input_A")->setPrecision(Precision::FP32); - network.getInputsInfo().at("input_B")->setPrecision(Precision::FP32); - if (!params.dims_C.empty()) - network.getInputsInfo().at("input_C")->setPrecision(Precision::FP32); - - network.getOutputsInfo().at("gemm")->setPrecision(Precision::FP32); - - return network; - } - - void runTest(const gemm_test_params& test_params, - const std::vector& data_A, - const std::vector& data_B, - const std::vector& data_C, - const std::vector& ref_output) { - test_params.print(std::cout); - - Core ie; - auto network = getNetwork(ie, test_params); - auto exec = ie.LoadNetwork(network, test_params.device_name); - auto request = exec.CreateInferRequest(); - - auto fill_blob = [&](const char* name, const std::vector& data) { - Blob::Ptr blob = request.GetBlob(name); - - auto fill_size = std::min(blob->size(), data.size()); - auto buffer = blob->buffer().as(); - - for (size_t i = 0; i < fill_size; ++i) { - buffer[i] = data[i]; - } - }; - - fill_blob("input_A", data_A); - fill_blob("input_B", data_B); - if (!test_params.dims_C.empty()) { - fill_blob("input_C", data_C); - } - - request.Infer(); - - if (!ref_output.empty()) { - Blob::Ptr blob_out = request.GetBlob("gemm"); - ASSERT_EQ(blob_out->size(), ref_output.size()); - - auto buf_out = blob_out->buffer().as(); - compare(buf_out, ref_output.data(), blob_out->size(), getThreshold(test_params)); - } - } -}; - -using GemmRandomTestParam = std::tuple< - std::string, // plugin - std::string, // precision - gemm_base_params>; // gemm params - -class GemmRandomTest : public GemmTestBase, public testing::WithParamInterface {}; - -// Basic cases: all transposition combinations, 2D-5D -#define case1 gemm_base_params(1.2f, 3.f, false, false, {9ul, 11ul}, {11ul, 13ul} ) -#define case2 gemm_base_params(1.2f, 3.f, false, false, {9ul, 11ul}, {11ul, 13ul}, {9ul, 13ul} ) -#define case3 gemm_base_params(2.5f, 1.2f, false, false, {7ul, 9ul, 11ul}, {7ul, 11ul, 13ul} ) -#define case4 gemm_base_params(2.5f, 1.2f, false, false, {7ul, 9ul, 11ul}, {7ul, 11ul, 13ul}, {7ul, 9ul, 13ul} ) -#define case5 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}) -#define case6 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 7ul, 9ul, 13ul} ) -#define case7 gemm_base_params(1.2f, -1.5f, false, false, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 11ul, 13ul}) -#define case8 gemm_base_params(1.2f, -1.5f, false, false, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 11ul, 13ul}, {2ul, 3ul, 7ul, 9ul, 13ul}) -#define case9 gemm_base_params(1.2f, 3.f, true, false, {11ul, 9ul}, {11ul, 13ul} ) -#define case10 gemm_base_params(1.2f, 3.f, true, false, {11ul, 9ul}, {11ul, 13ul}, {9ul, 13ul} ) -#define case11 gemm_base_params(2.5f, 1.2f, true, false, {7ul, 11ul, 9ul}, {7ul, 11ul, 13ul} ) -#define case12 gemm_base_params(2.5f, 1.2f, true, false, {7ul, 11ul, 9ul}, {7ul, 11ul, 13ul}, {7ul, 9ul, 13ul} ) -#define case13 gemm_base_params(1.2f, -1.5f, true, false, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 11ul, 13ul}) -#define case14 gemm_base_params(1.2f, -1.5f, true, false, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 7ul, 9ul, 13ul} ) -#define case15 gemm_base_params(1.2f, -1.5f, true, false, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 11ul, 13ul}) -#define case16 gemm_base_params(1.2f, -1.5f, true, false, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 11ul, 13ul}, {2ul, 3ul, 7ul, 9ul, 13ul}) -#define case17 gemm_base_params(1.2f, 3.f, false, true, {9ul, 11ul}, {13ul, 11ul} ) -#define case18 gemm_base_params(1.2f, 3.f, false, true, {9ul, 11ul}, {13ul, 11ul}, {9ul, 13ul} ) -#define case19 gemm_base_params(2.5f, 1.2f, false, true, {7ul, 9ul, 11ul}, {7ul, 13ul, 11ul} ) -#define case20 gemm_base_params(2.5f, 1.2f, false, true, {7ul, 9ul, 11ul}, {7ul, 13ul, 11ul}, {7ul, 9ul, 13ul} ) -#define case21 gemm_base_params(1.2f, -1.5f, false, true, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 13ul, 11ul}) -#define case22 gemm_base_params(1.2f, -1.5f, false, true, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 13ul, 11ul}, {3ul, 7ul, 9ul, 13ul} ) -#define case23 gemm_base_params(1.2f, -1.5f, false, true, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 13ul, 11ul}) -#define case24 gemm_base_params(1.2f, -1.5f, false, true, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 13ul, 11ul}, {2ul, 3ul, 7ul, 9ul, 13ul}) -#define case25 gemm_base_params(1.2f, 3.f, true, true, {11ul, 9ul}, {13ul, 11ul} ) -#define case26 gemm_base_params(1.2f, 3.f, true, true, {11ul, 9ul}, {13ul, 11ul}, {9ul, 13ul} ) -#define case27 gemm_base_params(2.5f, 1.2f, true, true, {7ul, 11ul, 9ul}, {7ul, 13ul, 11ul} ) -#define case28 gemm_base_params(2.5f, 1.2f, true, true, {7ul, 11ul, 9ul}, {7ul, 13ul, 11ul}, {7ul, 9ul, 13ul} ) -#define case29 gemm_base_params(1.2f, -1.5f, true, true, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 13ul, 11ul}) -#define case30 gemm_base_params(1.2f, -1.5f, true, true, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 13ul, 11ul}, {3ul, 7ul, 9ul, 13ul} ) -#define case31 gemm_base_params(1.2f, -1.5f, true, true, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 13ul, 11ul}) -#define case32 gemm_base_params(1.2f, -1.5f, true, true, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 13ul, 11ul}, {2ul, 3ul, 7ul, 9ul, 13ul}) - -// Broadcasting/dimension inference cases -#define case33 gemm_base_params(1.2f, -1.5f, false, false, {1ul, 1ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}) -#define case34 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {1ul, 1ul, 11ul, 13ul}) -#define case35 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {1ul, 1ul, 9ul, 13ul}) -#define case36 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}) -#define case37 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul}) -#define case38 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 1ul, 9ul, 13ul}) -#define case39 gemm_base_params(1.2f, -1.5f, false, false, {9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}) -#define case40 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {11ul, 13ul}) -#define case41 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {9ul, 13ul}) -#define case42 gemm_base_params(1.2f, -1.5f, false, false, {7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}) -#define case43 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {7ul, 11ul, 13ul}) -#define case44 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {7ul, 9ul, 13ul}) -#define case45 gemm_base_params(1.2f, -1.5f, false, false, {7ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul}) -#define case46 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {7ul, 11ul, 13ul}) -#define case47 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul}, {7ul, 9ul, 13ul}) - -#define all_cases \ - case1, case2, case3, case4, case5, case6, case7, case8, \ - case9, case10, case11, case12, case13, case14, case15, case16, \ - case17, case18, case19, case20, case21, case22, case23, case24, \ - case25, case26, case27, case28, case29, case30, case31, case32, \ - case33, case34, case35, case36, case37, case38, \ - case39, case40, case41, case42, case43, case44, \ - case45, case46, case47 - -TEST_P(GemmRandomTest, smoke_randomInput) { - gemm_test_params params = GetParam(); - - auto size_A = std::accumulate(params.dims_A.begin(), params.dims_A.end(), size_t(1), std::multiplies()); - auto size_B = std::accumulate(params.dims_B.begin(), params.dims_B.end(), size_t(1), std::multiplies()); - auto size_C = std::accumulate(params.dims_C.begin(), params.dims_C.end(), size_t(1), std::multiplies()); - - std::vector data_A(size_A); - std::vector data_B(size_B); - std::vector data_C(size_C); - - fill_data(data_A.data(), size_A); - fill_data(data_B.data(), size_B); - fill_data(data_C.data(), size_C); - - auto ref_output = ref_gemm(params, data_A, data_B, data_C); - - runTest(params, data_A, data_B, data_C, ref_output); -}; diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp deleted file mode 100644 index 0c62b519b3da78..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include - -using namespace ::testing; -using namespace InferenceEngine; -using namespace std; - -struct one_hot_base_params { - std::vector in; - std::vector out; - int axis; - unsigned int depth; - float on, off; -}; - -struct one_hot_test_params : one_hot_base_params { - std::string device_name; - - one_hot_test_params(std::string name, one_hot_base_params params) : - one_hot_base_params(params), device_name(name) {} -}; - -class OneHotOnlyTestShared: public TestsCommon, - public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - - - - _IN_ - - - - - _OUT_ - - - - - l - - - -)V0G0N"; - - std::string getModel(one_hot_test_params p) { - std::string model = model_t; - - std::string in_shape; - std::string out_shape; - - for (size_t i = 0; i < p.in.size(); i++) { - in_shape += ""; - in_shape += std::to_string(p.in[i]) + "\n"; - } - for (size_t i = 0; i < p.out.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out[i]) + "\n"; - } - - - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - REPLACE_WITH_NUM(model, "_AXIS_", p.axis); - REPLACE_WITH_NUM(model, "_DEPTH_", p.depth); - - return model; - } - - void ref_one_hot_4d(Blob &src, Blob &dst, one_hot_test_params p) - { - float *src_ptr = src.buffer().as(); - std::size_t src_size = src.size(); - float *dst_ptr = dst.buffer().as(); - std::size_t dst_size = dst.size(); - - int out_n = (p.out.size() >= 1) ? p.out[0] : 1; - int out_c = (p.out.size() >= 2) ? p.out[1] : 1; - int out_d = (p.out.size() == 5) ? p.out[2] : 1; - int out_h = (p.out.size() >= 3 && p.out.size() < 5) ? p.out[2] : (p.out.size() == 5) ? p.out[3] : 1; - int out_w = (p.out.size() >= 4 && p.out.size() < 5) ? p.out[3] : (p.out.size() == 5) ? p.out[4] : 1; - - int hot_axis = (p.axis == - 1) ? p.in.size() : p.axis; - - for (int ob = 0; ob < out_n; ob++) { - for (int oc = 0; oc < out_c; oc++) { - for (int od = 0; od < out_d; od++) { - for (int oh = 0; oh < out_h; oh++) { - for (int ow = 0; ow < out_w; ow++) { - std::size_t dst_offset = ow + out_w * oh + out_w * out_h * od + out_w * out_h * out_d * oc + out_w * out_h * out_d * out_c * ob; - std::size_t src_offset = 0; - - std::vector out_dims = {ob, oc, oh, ow}; - if (p.out.size() == 5) - out_dims.insert(out_dims.begin() + 2, od); - std::vector in_dims(out_dims.begin(), out_dims.end()); - in_dims.erase(in_dims.begin() + hot_axis); - - for (int i = 0; i < p.in.size(); i++) { - int mul = 1; - if (i == p.in.size() - 1) { - src_offset += in_dims[i]; - break; - } - for (int j = i; j < p.in.size(); j++) { - if (j == i) - mul *= in_dims[j]; - else - mul *= p.in[j]; - } - src_offset += mul; - } - - if (out_dims[hot_axis] == src_ptr[src_offset]) - dst_ptr[dst_offset] = p.on; - else - dst_ptr[dst_offset] = p.off; - } - } - } - } - } - } -protected: - virtual void SetUp() { - try { - TestsCommon::SetUp(); - one_hot_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - // Output Data - OutputsDataMap out = net.getOutputsInfo(); - std::pair item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - inferRequest.SetBlob(item.first, output); - - // Output Reference - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - Blob::Ptr src; - src = make_shared_blob({ Precision::FP32, p.in, TensorDesc::getLayoutByDims(p.in) }); - src->allocate(); - float* s = src->buffer().as(); - for (int i = 0; i < src->size(); ++i) - s[i] = -1; - s[0] = 1; - s[1] = 1; - inferRequest.SetBlob("input", src); - - inferRequest.Infer(); - - // Check results - ref_one_hot_4d(*src, dst_ref, p); - - compare(*output, dst_ref); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -#define case_2d_0 one_hot_base_params({{3}, {3, 6},-1, 6, 1.0f, 0.0f }) -#define case_2d_1 one_hot_base_params({{3}, {6, 3}, 0, 6, 1.0f, 0.0f }) -#define case_2d_2 one_hot_base_params({{3}, {3, 6}, 1, 6, 1.0f, 0.0f }) -#define case_3d_0 one_hot_base_params({{3, 2}, {3, 2, 4},-1, 4, 1.0f, 0.0f }) -#define case_3d_1 one_hot_base_params({{3, 2}, {4, 3, 2}, 0, 4, 1.0f, 0.0f }) -#define case_3d_2 one_hot_base_params({{3, 2}, {3, 4, 2}, 1, 4, 1.0f, 0.0f }) -#define case_4d_0 one_hot_base_params({ {1, 3, 2}, {1, 3, 2, 4},-1, 4, 1.0f, 0.0f }) -#define case_4d_1 one_hot_base_params({ {1, 3, 2}, {4, 1, 3, 2}, 0, 4, 1.0f, 0.0f }) -#define case_4d_2 one_hot_base_params({ {1, 3, 2}, {1, 4, 3, 2}, 1, 4, 1.0f, 0.0f }) -#define case_4d_3 one_hot_base_params({ {1, 3, 2}, {1, 3, 4, 2}, 2, 4, 1.0f, 0.0f }) -#define case_5d_0 one_hot_base_params({ {1, 3, 2, 3}, {4, 1, 3, 2, 3}, 0, 4, 1.0f, 0.0f }) -#define case_5d_1 one_hot_base_params({ {1, 3, 2, 3}, {1, 4, 3, 2, 3}, 1, 4, 1.0f, 0.0f }) -#define case_5d_2 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 4, 2, 3}, 2, 4, 1.0f, 0.0f }) -#define case_5d_3 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 2, 4, 3}, 3, 4, 1.0f, 0.0f }) -#define case_5d_4 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 2, 3, 4}, 4, 4, 1.0f, 0.0f }) - -TEST_P(OneHotOnlyTestShared, TestsOneHot) {} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp deleted file mode 100644 index 361a6f29f463f9..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace std; - - -struct permute_base_params { - SizeVector dims; - SizeVector order; -}; - -struct permute_test_params { - std::string device_name; - permute_base_params base; - permute_test_params(std::string name, permute_base_params params) : device_name(name), base(params) {} -}; - -template -void ref_permute(const TBlob &src, TBlob &dst, permute_base_params prm) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - SizeVector orderedDims; - for (auto ord : prm.order) { - orderedDims.push_back(src.getTensorDesc().getDims()[ord]); - } - TensorDesc desc(Precision::FP32, src.getTensorDesc().getDims(), {orderedDims, prm.order}); - - for (int i=0; i < src.size(); i++) { - dst_data[desc.offset(i)] = src_data[src.getTensorDesc().offset(i)]; - } -} - -class PermuteOnlyTests: public TestsCommon, - public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - __DIMS__ - - - - - - - - __DIMS__ - - - - - __DST_DIMS__ - - - - - - - - -)V0G0N"; - -protected: - std::string getModel(permute_base_params p) { - std::string model = model_t; - std::string dims; - std::string dst_dims; - for (auto& dim : p.dims) { - dims += ""; - dims += std::to_string(dim) + "\n"; - } - - std::string order; - for (auto& ord : p.order) { - if (!order.empty()) - order += ","; - order += std::to_string(ord); - dst_dims += ""; - dst_dims += std::to_string(p.dims[ord]) + "\n"; - } - - REPLACE_WITH_STR(model, "__DIMS__", dims); - REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims); - REPLACE_WITH_STR(model, "_ORDER_", order); - - return model; - } - - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - permute_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p.base); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - Blob::Ptr src = make_shared_blob({Precision::FP32, p.base.dims, - TensorDesc::getLayoutByDims(p.base.dims)}); - src->allocate(); - fill_data(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - inferRequest.SetBlob("in1", src); - - OutputsDataMap out = net.getOutputsInfo(); - auto item = *out.begin(); - - TBlob::Ptr output; - output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - inferRequest.SetBlob(item.first, output); - inferRequest.Infer(); - - TensorDesc td(Precision::FP32, p.base.dims, - TensorDesc::getLayoutByDims(p.base.dims)); - TBlob dst_ref(td); - dst_ref.allocate(); - - ref_permute(*srcPtr, dst_ref, p.base); - - compare(*output, dst_ref); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(PermuteOnlyTests, TestsPermute) {} - -#define case_1 permute_base_params{{2, 3, 4, 5}, {0, 1, 2, 3}} -#define case_2 permute_base_params{{2, 3, 4, 5}, {0, 2, 3, 1}} -#define case_3 permute_base_params{{2, 3, 4, 5}, {0, 2, 1, 3}} -#define case_4 permute_base_params{{2, 3, 4}, {0, 1, 2}} -#define case_5 permute_base_params{{2, 3, 4}, {0, 2, 1}} -#define case_6 permute_base_params{{2, 3}, {0, 1}} -#define case_7 permute_base_params{{2, 3, 4, 5, 6}, {0, 1, 2, 3, 4}} -#define case_8 permute_base_params{{2, 3, 4, 5, 6}, {0, 4, 2, 1, 3}} -#define case_9 permute_base_params{{2, 3, 4, 5, 6}, {0, 2, 4, 3, 1}} -#define case_10 permute_base_params{{2, 3, 4, 5, 6}, {0, 3, 2, 4, 1}} -#define case_11 permute_base_params{{2, 8, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}} -#define case_12 permute_base_params{{2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}} -#define case_13 permute_base_params{{2, 12, 9}, {0, 2, 1}} -#define case_14 permute_base_params{{2, 8, 3, 3, 4, 5}, {0, 3, 4, 1, 5, 2}} -#define case_15 permute_base_params{{2, 3, 4, 5}, {0, 1, 3, 2}} -#define case_16 permute_base_params{{2, 3, 4, 5, 7}, {0, 3, 1, 4, 2}} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp deleted file mode 100644 index 23fcd962a75c58..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "common_test_utils/data_utils.hpp" - -using namespace ::testing; -using namespace InferenceEngine; - -struct quantize_test_params { - std::string device_name; - - struct { - size_t n; - size_t c; - size_t h; - size_t w; - } in; - - size_t ic_const_blobs; - size_t levels; - bool reverse_out_vals; -}; - -template -void ref_quantize(const std::vector &srcs, std::vector &dsts, quantize_test_params prm) { - assert(dsts.size() == 1); - - const data_t* src_data = srcs[0]->buffer().as(); - const data_t* input_low_data = srcs[1]->buffer().as(); - const data_t* input_high_data = srcs[2]->buffer().as(); - const data_t* output_low_data = srcs[3]->buffer().as(); - const data_t* output_high_data = srcs[4]->buffer().as(); - - data_t* dst_data = dsts[0]->buffer().as(); - - size_t N = prm.in.n; - size_t C = prm.in.c; - size_t H = prm.in.h; - size_t W = prm.in.w; - size_t ICB = prm.ic_const_blobs; - - for (size_t n = 0; n < N; n++) { - for (size_t c = 0; c < C; c++) { - for (size_t h = 0; h < H; h++) { - for (size_t w = 0; w < W; w++) { - size_t idx = n*C*H*W + c*H*W + h*W + w; - - if (src_data[idx] <= input_low_data[c % ICB]) - dst_data[idx] = output_low_data[c % ICB]; - else if (src_data[idx] > input_high_data[c % ICB]) - dst_data[idx] = output_high_data[c % ICB]; - else - dst_data[idx] = roundf((src_data[idx] - input_low_data[c % ICB]) / - (input_high_data[c % ICB] - input_low_data[c % ICB]) * (prm.levels-1)) / - (prm.levels-1) * (output_high_data[c % ICB] - output_low_data[c % ICB]) + output_low_data[c % ICB]; - } - } - } - } -} - -class QuantizeOnlyTest : public TestsCommon, public WithParamInterface { - - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - 1 - _ICB_ - 1 - 1 - - - - - - - - - - 1 - _ICB_ - 1 - 1 - - - - - - - - - - 1 - _ICB_ - 1 - 1 - - - - - - - - - - 1 - _ICB_ - 1 - 1 - - - - - - - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - 1 - _ICB_ - 1 - 1 - - - 1 - _ICB_ - 1 - 1 - - - 1 - _ICB_ - 1 - 1 - - - 1 - _ICB_ - 1 - 1 - - - - - _IN_ - _IC_ - _IH_ - _IW_ - - - - - - - - - - - - -)V0G0N"; - - std::string getModel(quantize_test_params p) { - std::string model = model_t; - - REPLACE_WITH_NUM(model, "_IN_", p.in.n); - REPLACE_WITH_NUM(model, "_IC_", p.in.c); - REPLACE_WITH_NUM(model, "_IH_", p.in.h); - REPLACE_WITH_NUM(model, "_IW_", p.in.w); - REPLACE_WITH_NUM(model, "_L_", p.levels); - REPLACE_WITH_NUM(model, "_ICB_", p.ic_const_blobs); - - REPLACE_WITH_NUM(model, "_O1_", 0 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_S1_", 1 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_O2_", 1 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_S2_", 1 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_O3_", 2 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_S3_", 1 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_O4_", 3 * p.ic_const_blobs * sizeof(float)); - REPLACE_WITH_NUM(model, "_S4_", 1 * p.ic_const_blobs * sizeof(float)); - - return model; - } - -protected: - virtual void SetUp() { - - try { - quantize_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - std::vector srcs_vec; - Blob::Ptr blob_data = make_shared_blob({Precision::FP32, {p.in.n, p.in.c, p.in.h, p.in.w}, Layout::NCHW}); - blob_data->allocate(); - CommonTestUtils::fill_data_sine(blob_data->buffer().as(), blob_data->size(), 0.f, 2.f, 0.1f); - srcs_vec.push_back(blob_data); - - float low_center = p.levels == 2 ? 0.f : -1.f; - float high_center = p.levels == 2 ? 0.f : 1.f; - float low_val = p.reverse_out_vals ? 1.0f : -1.f; - float high_val = p.reverse_out_vals ? -1.0f : 1.f; - - Blob::Ptr input_low_data = make_shared_blob({Precision::FP32, {p.ic_const_blobs}, Layout::C}); - input_low_data->allocate(); - CommonTestUtils::fill_data_sine(input_low_data->buffer().as(), input_low_data->size(), low_center, 2.f, 0.2f); - srcs_vec.push_back(input_low_data); - - Blob::Ptr input_high_data = make_shared_blob({Precision::FP32, {p.ic_const_blobs}, Layout::C}); - input_high_data->allocate(); - CommonTestUtils::fill_data_sine(input_high_data->buffer().as(), input_high_data->size(), high_center, 2.f, 0.2f); - srcs_vec.push_back(input_high_data); - - Blob::Ptr output_low_data = make_shared_blob({Precision::FP32, { p.ic_const_blobs }, Layout::C}); - output_low_data->allocate(); - if (p.levels == 2) { - CommonTestUtils::fill_data_const(output_low_data, low_val); - } else { - CommonTestUtils::fill_data_sine(output_low_data->buffer().as(), output_low_data->size(), low_center, 2.f, 0.3f); - }; - srcs_vec.push_back(output_low_data); - - Blob::Ptr output_high_data = make_shared_blob({Precision::FP32, {p.ic_const_blobs}, Layout::C}); - output_high_data->allocate(); - if (p.levels == 2) { - CommonTestUtils::fill_data_const(output_high_data, high_val); - } else { - CommonTestUtils::fill_data_sine(output_high_data->buffer().as(), output_high_data->size(), high_center, 2.f, 0.3f); - }; - srcs_vec.push_back(output_high_data); - - TBlob *weights_ptr = new TBlob({Precision::U8, {4 * p.ic_const_blobs * sizeof(float)}, Layout::C}); - weights_ptr->allocate(); - - float* pwei = weights_ptr->buffer().as(); - int off = 0; - for (int i = 1; i < 5; i++) { - float* pdata = srcs_vec[i]->buffer(); - for (int j = 0; j < p.ic_const_blobs; j++) { - pwei[off++] = pdata[j]; - } - } - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, TBlob::Ptr(weights_ptr)); - - std::map config = {{PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE, PluginConfigParams::NO}}; - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name, config); - InferRequest inferRequest = executable_network.CreateInferRequest(); - inferRequest.SetBlob("data", blob_data); - - std::vector dsts_vec; - std::vector out_vec; - - OutputsDataMap out_info_map = net.getOutputsInfo(); - for (auto info : out_info_map) { - Blob::Ptr blob = make_shared_blob({Precision::FP32, info.second->getDims() , Layout::NCHW}); - blob->allocate(); - inferRequest.SetBlob(info.first, blob); - out_vec.push_back(blob); - - Blob::Ptr blob_ref = make_shared_blob({Precision::FP32, info.second->getDims(), Layout::NCHW}); - blob_ref->allocate(); - dsts_vec.push_back(blob_ref); - } - - ref_quantize(srcs_vec, dsts_vec, p); - - inferRequest.Infer(); - - compare(*out_vec[0], *dsts_vec[0]); - - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -// {N, C, H, W}, ic_const_blobs, quantization_levels, reverse_out_vals -#define case_1 {1, 8, 5, 5}, 1, 2, false -#define case_2 {1, 8, 5, 5}, 8, 2, false -#define case_3 {1, 8, 5, 5}, 1, 4, false -#define case_4 {1, 8, 5, 5}, 8, 4, false -#define case_5 {1, 8, 5, 4}, 1, 8, false -#define case_6 {1, 8, 5, 4}, 8, 8, false -#define case_7 {1, 17, 5, 5}, 1, 2, false -#define case_8 {1, 17, 5, 5}, 17, 2, false -#define case_9 {1, 17, 5, 5}, 1, 4, false -#define case_10 {1, 17, 5, 5}, 17, 4, false -#define case_11 {1, 17, 5, 4}, 1, 8, false -#define case_12 {1, 17, 5, 4}, 17, 8, false -#define case_13 {1, 8, 5, 5}, 1, 2, true -#define case_14 {1, 8, 5, 5}, 8, 2, true -#define case_15 {1, 8, 5, 5}, 1, 4, true -#define case_16 {1, 8, 5, 5}, 8, 4, true -#define case_17 {1, 8, 5, 4}, 1, 8, true -#define case_18 {1, 8, 5, 4}, 8, 8, true -#define case_19 {1, 17, 5, 5}, 1, 2, true -#define case_20 {1, 17, 5, 5}, 17, 2, true -#define case_21 {1, 17, 5, 5}, 1, 4, true -#define case_22 {1, 17, 5, 5}, 17, 4, true -#define case_23 {1, 17, 5, 4}, 1, 8, true -#define case_24 {1, 17, 5, 4}, 17, 8, true - -TEST_P(QuantizeOnlyTest, TestsQuantize) {} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp deleted file mode 100644 index 63a6eded90b7d8..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" -#include "ie_memcpy.h" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace std; - -struct reduce_test_params { - std::string device_name; - std::string inIdxPrecision;; - std::string reduce_type; - bool keep_dims; - SizeVector in_shape; - std::vector input_tensor; - std::vector axes_for_reduction; - SizeVector out_shape; - std::vector reference; -}; - -template -void reduce( - const float* src_data, - SizeVector src_dims, - SizeVector srcStrides, - float* dst_data, - SizeVector dst_dims, - SizeVector dstStrides, - float init_value, - bool keep_dims, - SizeVector skip_dims, - F func -) { - size_t i, src_idx, dst_idx; - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = init_value; - - SizeVector counters(src_dims.size(), 0); - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) { - if (keep_dims) - for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i) - dst_idx += (counters[i] % dst_dims[i]) * dstStrides[i]; - else - for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i) - dst_idx += counters[skip_dims[i]] * dstStrides[i]; - - dst_data[dst_idx] = func(dst_data[dst_idx], src_data[src_idx]); - for (int j = src_dims.size() - 1; j >= 0; j--) { - counters[j] = (counters[j] + 1) % src_dims[j]; - if (counters[j] != 0) break; - } - } -} - -void ref_reduce( - std::string reduce_type, - TBlob &src, - bool keep_dims, - std::vector axes_for_reduction, - TBlob &dst, - SizeVector &out_dims -) { - size_t i, src_idx, dst_idx; - const float* src_data = src.data(); - SizeVector src_dims = src.getTensorDesc().getDims(); - SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides(); - float* dst_data = dst.data(); - SizeVector dst_dims = dst.getTensorDesc().getDims(); - SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides(); - SizeVector skip_dims; - - if (!dst_dims.size()) - dst_dims = InferenceEngine::SizeVector(1, 1); - - if (!dstStrides.size()) - dstStrides = InferenceEngine::SizeVector(1, 1); - - if (axes_for_reduction.size() == 0) - FAIL() << " Index vector should be 1 dimension"; - - for (i = 0; i < axes_for_reduction.size(); i++) { - int32_t axis = axes_for_reduction[i]; - if (axis < 0) - axis += src_dims.size(); - - if (axis > src_dims.size()) - FAIL() << " Index to squeeze exceeds data tensor dimension"; - axes_for_reduction[i] = axis; - } - - for (size_t j = 0; j < src_dims.size(); j++) { - bool found = false; - for (size_t axis : axes_for_reduction) - if (j == axis) found = true; - - if (!found) { - out_dims.push_back(src_dims[j]); - if (!keep_dims) skip_dims.push_back(j); - } - else { - if (keep_dims) out_dims.push_back(1); - } - } - - if (reduce_type == "ReduceAnd") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x && y; } ); - } else { - dst_data[0] = 1.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] && src_data[src_idx]; - } - } else if (reduce_type == "ReduceL1") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + (std::abs)(y); } ); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += (std::abs)(src_data[src_idx]); - } - } else if (reduce_type == "ReduceL2") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + y * y; } ); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = (std::sqrt)(dst_data[i]); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx] * src_data[src_idx]; - dst_data[0] = sqrt(dst_data[0]); - } - } else if (reduce_type == "ReduceLogSum") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + y; }); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = logf(dst_data[i]); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - dst_data[0] = logf(dst_data[0]); - } - } else if (reduce_type == "ReduceLogSumExp") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + expf(y); }); - - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] = logf(dst_data[i]); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += expf(src_data[src_idx]); - dst_data[0] = logf(dst_data[0]); - } - } else if (reduce_type == "ReduceMax") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, FLT_MIN, keep_dims, skip_dims, - [](float x, float y)->float { return x > y ? x : y; }); - } else { - dst_data[0] = FLT_MIN; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] > src_data[src_idx] ? dst_data[0] : src_data[src_idx]; - } - } else if (reduce_type == "ReduceMean") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + y; }); - float reduced_dims_work_amount = 1.f; - for (size_t axis : axes_for_reduction) { - reduced_dims_work_amount *= static_cast(src_dims[axis]); - } - for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i) - dst_data[i] /= reduced_dims_work_amount; - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - dst_data[0] /= static_cast(srcStrides[0] * src_dims[0]); - } - } else if (reduce_type == "ReduceMin") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, FLT_MAX, keep_dims, skip_dims, - [](float x, float y)->float { return x < y ? x : y; }); - } else { - dst_data[0] = FLT_MAX; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] < src_data[src_idx] ? dst_data[0] : src_data[src_idx]; - } - } else if (reduce_type == "ReduceOr") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x || y; }); - } else { - dst_data[0] = 0; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] = dst_data[0] || src_data[src_idx]; - } - } else if (reduce_type == "ReduceProd") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x * y; }); - } else { - dst_data[0] = 1.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] *= src_data[src_idx]; - } - } else if (reduce_type == "ReduceSum") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + y; }); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx]; - } - } else if (reduce_type == "ReduceSumSquare") { - if (out_dims.size()) { - reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims, - [](float x, float y)->float { return x + y * y; }); - } else { - dst_data[0] = 0.0f; - for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) - dst_data[0] += src_data[src_idx] * src_data[src_idx]; - } - } -} - -class ReduceTestsShared : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - - - - - - - _DIM_SIZE_ - - - - - - - - - - - _IN_ - - - _DIM_SIZE_ - - - - - _OUT_ - - - - - - - - - -)V0G0N"; - - std::string getModel(reduce_test_params p) { - std::string model = model_t; - std::string in_shape; - std::string out_shape = ""; - - for (size_t i = 0; i < p.in_shape.size(); i++) { - in_shape += ""; - in_shape += std::to_string(p.in_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_IN_", in_shape); - REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.axes_for_reduction.size()); - REPLACE_WITH_STR(model, "_REDUCE_TYPE_", p.reduce_type); - REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision); - REPLACE_WITH_NUM(model, "_KEEP_DIMS_", p.keep_dims); - - for (size_t i = 0; i < p.out_shape.size(); i++) { - out_shape += ""; - out_shape += std::to_string(p.out_shape[i]) + "\n"; - } - REPLACE_WITH_STR(model, "_OUT_", out_shape); - - return model; - } - -protected: - virtual void TearDown() { - } - - static void fill_data_dbgval(float* data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = i + 1; - } - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - reduce_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - // std::cout << model << std::endl; - - TBlob * axes = nullptr; - if (p.inIdxPrecision == "I32") { - axes = new TBlob({Precision::U8, - {p.axes_for_reduction.size() * sizeof(int32_t)}, - Layout::C}); - axes->allocate(); - for (size_t i = 0; i < p.axes_for_reduction.size(); i++) { - ((int32_t *) axes->buffer())[i] = p.axes_for_reduction[i]; - } - } else { - axes = new TBlob({Precision::U8, - { p.axes_for_reduction.size() * sizeof(float) }, - Layout::C}); - axes->allocate(); - for (size_t i = 0; i < p.axes_for_reduction.size(); i++) { - ((float *) axes->buffer())[i] = p.axes_for_reduction[i]; - } - } - - Core ie; - auto net = ie.ReadNetwork(model, TBlob::Ptr(axes)); - OutputsDataMap out = net.getOutputsInfo(); - std::pair item = *out.begin(); - - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - // Input Data - Blob::Ptr src; - src = make_shared_blob({ Precision::FP32, p.in_shape, TensorDesc::getLayoutByDims(p.in_shape) }); - src->allocate(); - if(p.input_tensor.size()) - ie_memcpy(src->buffer(), src->byteSize(), &p.input_tensor[0], sizeof(float)*p.input_tensor.size()); - else - fill_data_dbgval(src->buffer(), src->size()); - - auto* srcPtr = dynamic_cast*>(src.get()); - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - // Output Reference - TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - SizeVector out_dims; - ref_reduce(p.reduce_type, *srcPtr, p.keep_dims, p.axes_for_reduction, dst_ref, out_dims); - if (p.out_shape.size()>0 && out_dims.size() != p.out_shape.size()) - FAIL() << "Wrong out_shape size!"; - for (size_t i = 0; i < p.out_shape.size(); i++) { - if (out_dims[i] != p.out_shape[i]) - FAIL() << "Wrong out_shape dimensions!"; - } - if (p.reference.size()) - if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0) - FAIL() << "Wrong result with compare reference vector!"; - - // Output Data - auto output = make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - inferRequest.SetBlob(item.first, output); - - // Input - inferRequest.SetBlob("input", src); - inferRequest.Infer(); - - compare(*output, dst_ref); - } catch (const Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(ReduceTestsShared, SharedReduceTests) {} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp deleted file mode 100644 index 2e9b53a3fab341..00000000000000 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "tests_common.hpp" -#include "single_layer_common.hpp" - -using namespace ::testing; -using namespace InferenceEngine; -using namespace std; - -struct resample_test_params { - std::string device_name; - InferenceEngine::SizeVector in_dims; - float factor; - std::string type; -}; - -static inline float triangleCoeff(float x) { - return std::max(0.0f, 1 - std::abs(x)); -} - -template -static void ref_resample(const InferenceEngine::TBlob &src, InferenceEngine::TBlob &dst, resample_test_params p) { - const data_t *src_data = src.readOnly(); - data_t *dst_data = dst.data(); - - size_t ndims = p.in_dims.size(); - - size_t N = p.in_dims[0]; - size_t C = p.in_dims[1]; - size_t ID = ndims == 5 ? p.in_dims[ndims - 3] : 1; - size_t IH = p.in_dims[ndims - 2]; - size_t IW = p.in_dims[ndims - 1]; - size_t OD = ndims == 5 ? static_cast(ID / p.factor) : 1; - size_t OH = static_cast(IH / p.factor); - size_t OW = static_cast(IW / p.factor); - - float fx = static_cast(IW) / static_cast(OW); - float fy = static_cast(IH) / static_cast(OH); - float fz = static_cast(ID) / static_cast(OD); - - if (p.type == "caffe.ResampleParameter.NEAREST") { - for (size_t b = 0; b < N; b++) { - for (size_t c = 0; c < C; c++) { - const float* in_ptr = src_data + IW * IH * ID * C * b + IW * IH * ID * c; - float* out_ptr = dst_data + OW * OH * OD * C * b + OW * OH * OD * c; - for (size_t oz = 0; oz < OD; oz++) { - for (size_t oy = 0; oy < OH; oy++) { - for (size_t ox = 0; ox < OW; ox++) { - float ix = ox * fx; - float iy = oy * fy; - float iz = oz * fz; - - size_t ix_r = static_cast(std::floor(ix)); - size_t iy_r = static_cast(std::floor(iy)); - size_t iz_r = static_cast(std::floor(iz)); - - out_ptr[oz * OH * OW + oy * OW + ox] = in_ptr[iz_r * IH * IW + iy_r * IW + ix_r]; - } - } - } - } - } - } else if (p.type == "caffe.ResampleParameter.LINEAR") { - size_t kernel_width = 2; - bool isDownsample = (fx > 1) || (fy > 1) || (fz > 1); - bool antialias = false; - - for (size_t b = 0; b < N; b++) { - for (size_t c = 0; c < C; c++) { - const float* in_ptr = src_data + IW * IH * ID * C * b + IW * IH * ID * c; - float* out_ptr = dst_data + OW * OH * OD * C * b + OW * OH * OD * c; - - for (size_t oz = 0; oz < OD; oz++) { - for (size_t oy = 0; oy < OH; oy++) { - for (size_t ox = 0; ox < OW; ox++) { - float ix = ox * fx + fy / 2.0f - 0.5f; - float iy = oy * fy + fx / 2.0f - 0.5f; - float iz = oz * fz + fz / 2.0f - 0.5f; - - int ix_r = static_cast(round(ix)); - int iy_r = static_cast(round(iy)); - int iz_r = static_cast(round(iz)); - - float sum = 0; - float wsum = 0; - - float ax = 1.0f / (antialias ? fx : 1.0f); - float ay = 1.0f / (antialias ? fy : 1.0f); - float az = 1.0f / (antialias ? fz : 1.0f); - - int rx = (fx < 1.0f) ? 2 : static_cast(ceil(static_cast(kernel_width) / ax)); - int ry = (fy < 1.0f) ? 2 : static_cast(ceil(static_cast(kernel_width) / ay)); - int rz = (fz < 1.0f) ? 2 : static_cast(ceil(static_cast(kernel_width) / az)); - - for (int z = iz_r - rz; z <= iz_r + rz; z++) { - for (int y = iy_r - ry; y <= iy_r + ry; y++) { - for (int x = ix_r - rx; x <= ix_r + rx; x++) { - if (z < 0 || y < 0 || x < 0 || z >= static_cast(ID) || y >= static_cast(IH) || x >= static_cast(IW)) - continue; - - float dx = ix - x; - float dy = iy - y; - float dz = iz - z; - - float w = ax * triangleCoeff(ax * dx) * ay * triangleCoeff(ay * dy) * az * triangleCoeff(az * dz); - - sum += w * in_ptr[z * IH * IW + y * IW + x]; - wsum += w; - } - } - } - out_ptr[oz * OH * OW + oy * OW + ox] = (!wsum) ? 0 : (sum / wsum); - } - } - } - } - } - } else { - assert(!"Unsupported resample operation type"); - } -} - -class ResampleTests : public TestsCommon, public WithParamInterface { - std::string model_t = R"V0G0N( - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - - - - _IN_ - _IC_ - _ID_ - _IH_ - _IW_ - - - - - _IN_ - _IC_ - _OD_ - _OH_ - _OW_ - - - - - - - - -)V0G0N"; - - std::string getModel(resample_test_params p) { - std::string model = model_t; - std::string inDim; - - auto dims_size = p.in_dims.size(); - if (dims_size == 4) { - REMOVE_LINE(model, "_ID_"); - REMOVE_LINE(model, "_OD_"); - } - - REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]); - REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]); - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_ID_", p.in_dims[dims_size - 3]); - REPLACE_WITH_NUM(model, "_IH_", p.in_dims[dims_size - 2]); - REPLACE_WITH_NUM(model, "_IW_", p.in_dims[dims_size - 1]); - - if (dims_size == 5) - REPLACE_WITH_NUM(model, "_OD_", (int)(p.in_dims[dims_size - 3] / p.factor)); - REPLACE_WITH_NUM(model, "_OH_", (int)(p.in_dims[dims_size - 2] / p.factor)); - REPLACE_WITH_NUM(model, "_OW_", (int)(p.in_dims[dims_size - 1] / p.factor)); - - REPLACE_WITH_NUM(model, "_AN_", 0); - REPLACE_WITH_NUM(model, "_F_", p.factor); - REPLACE_WITH_STR(model, "_T_", p.type); - - return model; - } - -protected: - virtual void TearDown() { - } - - virtual void SetUp() { - try { - TestsCommon::SetUp(); - resample_test_params p = ::testing::WithParamInterface::GetParam(); - std::string model = getModel(p); - - Core ie; - CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr()); - InputsDataMap in_info_map = net.getInputsInfo(); - OutputsDataMap out_info_map = net.getOutputsInfo(); - - ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name); - InferRequest inferRequest = executable_network.CreateInferRequest(); - - InferenceEngine::Layout layout = InferenceEngine::ANY; - switch (p.in_dims.size()) { - case 4: layout = InferenceEngine::NCHW; break; - case 5: layout = InferenceEngine::NCDHW; break; - default: - FAIL() << "Input dims size not supported in this test."; - } - - InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob({InferenceEngine::Precision::FP32, p.in_dims, layout}); - src->allocate(); - fill_data(src->buffer(), src->size()); - for (size_t i = 0; i < src->size(); i++) { - src->buffer().as()[i] = static_cast(i); - } - - auto * srcPtr = dynamic_cast*>(src.get()); - - if (srcPtr == nullptr) - FAIL() << "Cannot cast blob to TBlob."; - - InferenceEngine::BlobMap srcs; - srcs.insert(std::pair("input", src)); - - InferenceEngine::OutputsDataMap out; - out = net.getOutputsInfo(); - InferenceEngine::BlobMap outputBlobs; - - std::pair item = *out.begin(); - - InferenceEngine::TBlob::Ptr output; - output = InferenceEngine::make_shared_blob(item.second->getTensorDesc()); - output->allocate(); - outputBlobs[item.first] = output; - - inferRequest.SetInput(srcs); - inferRequest.SetOutput(outputBlobs); - inferRequest.Infer(); - - InferenceEngine::TBlob dst_ref(item.second->getTensorDesc()); - dst_ref.allocate(); - - ref_resample(*srcPtr, dst_ref, p); - - compare(*output, dst_ref); - } catch (const InferenceEngine::Exception &e) { - FAIL() << e.what(); - } - } -}; - -TEST_P(ResampleTests, TestsResample) {} diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp index 706fdc1240f1bd..6fd48357cb8d40 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp @@ -12,11 +12,9 @@ #include "plg_test.hpp" #include "single_layer_common.hpp" -#include "ir_gen_helper.hpp" using namespace ::testing; using namespace InferenceEngine; -using namespace single_layer_tests; struct ti_test_params { std::string device_name; diff --git a/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt b/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt index b3245238c30f2c..5374ecd4519cfd 100644 --- a/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt +++ b/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt @@ -37,6 +37,9 @@ addIeTarget( ${CMAKE_CURRENT_SOURCE_DIR}/common/regression/helpers LINK_LIBRARIES IESharedTests + ieTestHelpers + funcTestUtils + format_reader vpu_graph_transformer vpu_custom_kernels ) @@ -44,6 +47,7 @@ addIeTarget( target_include_directories(VPUCommonTests INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/vpu_base ${CMAKE_CURRENT_SOURCE_DIR}/common/regression/helpers + $ $ ) diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp index 5330b3627b7876..b99a3bbca16da4 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp @@ -1,15 +1,15 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// +// // Copyright (C) 2018-2021 Intel Corporation +// // SPDX-License-Identifier: Apache-2.0 +// // -#include "regression_reference.hpp" -#include "vpu_tests_config.hpp" +// #include "regression_reference.hpp" +// #include "vpu_tests_config.hpp" -namespace Regression { - namespace Reference { +// namespace Regression { +// namespace Reference { - std::map> values = { - }; +// std::map> values = { +// }; - } // namespace Reference -} // namespace Regression +// } // namespace Reference +// } // namespace Regression diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp index 8047b9d848ed49..82217999d188f2 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp @@ -7,16 +7,16 @@ #include #include #include -#include +#include #include #include +#include #include #include "vpu_case_params.hpp" #include "vpu_param_containers.hpp" using namespace ::testing; using namespace InferenceEngine; -using namespace Regression::Matchers; #define DISABLE_IF(expr) \ do { \ @@ -62,7 +62,7 @@ using PluginDevicePair = std::pair; // class VpuNoRegressionBase //------------------------------------------------------------------------------ -class VpuNoRegressionBase : public Regression::RegressionTests { +class VpuNoRegressionBase : public TestsCommon { public: //Operations static std::string getTestCaseName(PluginDevicePair, @@ -71,7 +71,7 @@ class VpuNoRegressionBase : public Regression::RegressionTests { DoReshape); // Accessors - std::string getDeviceName() const override; + std::string getDeviceName() const; protected: // Data section diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp index 55ad08f4a32d05..4bdd0b55eab45e 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp @@ -4,12 +4,12 @@ #pragma once -#include #include #include +#include +#include +#include -using namespace InferenceEngine; -using namespace Regression::Matchers; using String2StringMap = std::map; using AdditionCfgParamsFactory = std::function; @@ -53,23 +53,12 @@ class ClassificationSrcParam : public SourceParameterBase { inline ClassificationSrcParam( std::string model_name, std::string img_name, - double reference_delta, - Regression::EMean mean = Regression::EMean::eValues); - - // Accessors - inline Regression::EMean mean() const; - - // Operations - inline std::string name() const override; + double reference_delta); friend std::ostream& operator<<(std::ostream& os, const ClassificationSrcParam& param) { return os << param.modelName() << ", " << param.imageName() << - ", " << std::to_string(param.referenceDelta()) << ", " << format_mean(param.mean()); + ", " << std::to_string(param.referenceDelta()); } - -private: - //Data section - Regression::EMean mean_; }; //------------------------------------------------------------------------------ @@ -136,19 +125,8 @@ inline std::string SourceParameterBase::name() const { inline ClassificationSrcParam::ClassificationSrcParam( std::string model_name, std::string img_name, - double reference_delta, - Regression::EMean mean): - SourceParameterBase(model_name, img_name, reference_delta), - mean_(mean) { -} - -inline Regression::EMean ClassificationSrcParam::mean() const { - return mean_; -} - -inline std::string ClassificationSrcParam::name() const { - return SourceParameterBase::name() + - "_Mean=" + format_mean(mean_); + double reference_delta): + SourceParameterBase(model_name, img_name, reference_delta) { } //------------------------------------------------------------------------------ diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp index dd767687613871..d4dcc61de90fd9 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp @@ -3,6 +3,7 @@ // #include "vpu_classification_case.hpp" +#include "functional_test_utils/plugin_cache.hpp" //------------------------------------------------------------------------------ // Implementation of methods of class VpuNoClassificationRegression diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp index 81dd646c459ff9..c62aab176b8709 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp @@ -4,6 +4,7 @@ #pragma once +#include #include "vpu_case_common.hpp" using CompilationTestParam = WithParamInterface>; @@ -63,14 +64,14 @@ class VpuNoClassificationRegressionSpecific : public VpuNoRegressionBase, // class VpuNoRegressionWithCompilation //------------------------------------------------------------------------------ -class VpuNoRegressionWithCompilation : public Regression::RegressionTests, +class VpuNoRegressionWithCompilation : public TestsCommon, public CompilationTestParam { public: // Operations static std::string getTestCaseName(TestParamInfo param); // Accessors - std::string getDeviceName() const override; + std::string getDeviceName() const; protected: // Data section diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp index 94893b8a59dc5e..b5d55ee08f7aa1 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp @@ -4,6 +4,9 @@ #include "vpu_raw_results_case.hpp" +#include +#include + std::vector operator + (std::vector && l, const std::vector & r) { l.insert(l.end(), r.begin(), r.end()); diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp index 9ec8f77bd947eb..fa3e86da34548b 100644 --- a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp @@ -15,6 +15,7 @@ #include #include #include "myriad_layers_reference_functions.hpp" +#include "common_test_utils/xml_net_builder/xml_net_builder.hpp" #include "vpu_layers_tests.hpp" #include diff --git a/inference-engine/tests_deprecated/helpers/CMakeLists.txt b/inference-engine/tests_deprecated/helpers/CMakeLists.txt index 5b9c2258d3aa16..6a4b0a05be0c73 100644 --- a/inference-engine/tests_deprecated/helpers/CMakeLists.txt +++ b/inference-engine/tests_deprecated/helpers/CMakeLists.txt @@ -48,3 +48,8 @@ if (ENABLE_DATA) add_dependencies(${TARGET_NAME} data) add_dependencies(${TARGET_NAME}_s data) endif() + +# developer package + +openvino_developer_export_targets(COMPONENT inference_engine_tests + TARGETS ieTestHelpers ieTestHelpers_s) diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/ie_core_adapter.cpp b/inference-engine/tests_deprecated/helpers/ie_core_adapter.cpp similarity index 100% rename from inference-engine/tests_deprecated/functional/ie_tests/src/ie_core_adapter.cpp rename to inference-engine/tests_deprecated/helpers/ie_core_adapter.cpp diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/ie_core_adapter.hpp b/inference-engine/tests_deprecated/helpers/ie_core_adapter.hpp similarity index 100% rename from inference-engine/tests_deprecated/functional/ie_tests/include/ie_core_adapter.hpp rename to inference-engine/tests_deprecated/helpers/ie_core_adapter.hpp diff --git a/inference-engine/tests_deprecated/helpers/ir_gen_helper.cpp b/inference-engine/tests_deprecated/helpers/ir_gen_helper.cpp deleted file mode 100644 index e883b516e56dc6..00000000000000 --- a/inference-engine/tests_deprecated/helpers/ir_gen_helper.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "ir_gen_helper.hpp" - -namespace single_layer_tests { - - std::string IRTemplateGenerator::getIRTemplate(const std::string& name, - const std::vector& input_shape, - const std::string& precision, - const std::string& layers, - const std::string& edges, - const unsigned ir_version, - const std::string& metadata) { - const std::vector< std::vector> input_shape_vector = { input_shape }; - return getIRTemplate(name, input_shape_vector, precision, layers, edges, ir_version, metadata); - } - std::string IRTemplateGenerator::getIRTemplate(const std::string& name, - const std::vector>& input_shape, - const std::string& precision, - const std::string& layers, - const std::string& edges, - const unsigned ir_version, - const std::string& metadata) { - std::string model = model_t; - REPLACE_WITH_STR(model, "_NAME_", name); - REPLACE_WITH_NUM(model, "_IRv_", ir_version); - std::string input_layers; - for (int i = 0; i < input_shape.size(); i++) { - std::string model_input = model_input_t; - std::string s_dims; - for (auto& dim : input_shape[0]) { - s_dims += "\n\t "; - s_dims += std::to_string(dim) + ""; - } - REPLACE_WITH_STR(model_input, "_ID_", std::to_string(i)); - std::string input_name = "in" + std::to_string(i + 1); - REPLACE_WITH_STR(model_input, "_input_name_", input_name); - REPLACE_WITH_STR(model_input, "__SRC_DIMS__", s_dims); - input_layers += model_input; - } - REPLACE_WITH_STR(model, "__INPUT_LAYERS_", input_layers); - REPLACE_WITH_STR(model, "_PR_", precision); - REPLACE_WITH_STR(model, "_LAYERS_", layers); - REPLACE_WITH_STR(model, "_EDGES_", edges); - REPLACE_WITH_STR(model, "_META_DATA_", metadata); - - return model; - } - - std::string IRTemplateGenerator::model_input_t = R"V0G0N( - - - __SRC_DIMS__ - - - - - )V0G0N"; - - std::string IRTemplateGenerator::model_t = R"V0G0N( - - - __INPUT_LAYERS_ - _LAYERS_ - - - _EDGES_ - - - - _META_DATA_ - - - - )V0G0N"; -} // namespace single_layer_tests diff --git a/inference-engine/tests_deprecated/helpers/ir_gen_helper.hpp b/inference-engine/tests_deprecated/helpers/ir_gen_helper.hpp deleted file mode 100644 index e81aa5febdd8d9..00000000000000 --- a/inference-engine/tests_deprecated/helpers/ir_gen_helper.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifndef IR_GEN_HELPER_HPP -#define IR_GEN_HELPER_HPP - -#include "single_layer_common.hpp" - -namespace single_layer_tests { - - class IRTemplateGenerator { - IRTemplateGenerator() = default; - public: - static std::string model_t; - static std::string model_input_t; - - static std::string getIRTemplate(const std::string& name, - const std::vector& input_shape, - const std::string& precision, - const std::string& layers, - const std::string& edges, - const unsigned ir_version = 5u, - const std::string& metadata = ""); - - - - static std::string getIRTemplate(const std::string& name, - const std::vector>& input_shape, - const std::string& precision, - const std::string& layers, - const std::string& edges, - const unsigned ir_version = 5u, - const std::string& metadata = ""); - - }; - -} // namespace single_layer_tests -#endif /* IR_GEN_HELPER_HPP */ - diff --git a/inference-engine/tests_deprecated/helpers/tests_common_func.cpp b/inference-engine/tests_deprecated/helpers/tests_common_func.cpp deleted file mode 100644 index 5738cf75521863..00000000000000 --- a/inference-engine/tests_deprecated/helpers/tests_common_func.cpp +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "tests_common_func.hpp" - -using namespace InferenceEngine; - -inline void bswap_32(char* ptr, size_t size) { - char* end = ptr + size; - char tmp; - for (; ptr < end; ptr+=4) { - tmp = ptr[0]; ptr[0] = ptr[3]; ptr[3] = tmp; - tmp = ptr[1]; ptr[1] = ptr[2]; ptr[2] = tmp; - } -} - -InferenceEngine::Blob::Ptr readBMP(std::string path, unsigned batch) { - - std::ifstream input(path, std::ios::binary); - if (!input) return nullptr; - - unsigned char bmpFileHeader[14]; - input.read((char*)bmpFileHeader, sizeof(bmpFileHeader)); - if(bmpFileHeader[0]!='B' || bmpFileHeader[1]!='M') return nullptr; - if(bmpFileHeader[11]!=0 || bmpFileHeader[12]!=0 || bmpFileHeader[13]!=0 ) return nullptr; - - unsigned char bmpInfoHeader[40]; - input.read((char*)bmpInfoHeader, sizeof(bmpInfoHeader)); - if(bmpInfoHeader[14]!=24) return nullptr; // bits per pixel - if(bmpInfoHeader[16]!=0) return nullptr; // compression is not supported - - bool rowsReversed = (*(int32_t*)(bmpInfoHeader + 8)) < 0; - uint32_t width = *(int32_t*)(bmpInfoHeader + 4); - uint32_t height = abs(*(int32_t*)(bmpInfoHeader + 8)); - - size_t padSize = width & 3; - char pad[3]; - - InferenceEngine::Blob::Ptr blob(new InferenceEngine::TBlob( - {InferenceEngine::Precision::FP32, {batch, 3, height, width}, InferenceEngine::Layout::NCHW})); - blob->allocate(); - float *blob_ptr = (float*)(void*)blob->buffer(); - - unsigned int offset = *(unsigned int *)(bmpFileHeader + 10); - for (int b = 0; b < batch; b++) { - int b_off = 3*width*height*b; - input.seekg(offset, std::ios::beg); - //reading by rows in invert vertically - for (uint32_t i = 0; i < height; i++) { - int storeAt = rowsReversed ? i : height - 1 - i; - - for (uint32_t j = 0; j < width; j++) { - unsigned char RGBA[3]; - input.read((char *) RGBA, sizeof(RGBA)); - - blob_ptr[b_off + j + storeAt * width] = RGBA[0]; - blob_ptr[b_off + j + storeAt * width + height * width * 1] = RGBA[1]; - blob_ptr[b_off + j + storeAt * width + height * width * 2] = RGBA[2]; - } - input.read(pad, padSize); - } - } - - return blob; -} - -InferenceEngine::Blob::Ptr readUbyte(std::string path, unsigned batch) { - - std::ifstream input(path, std::ios::binary); - struct { - uint32_t magic_number; - uint32_t n_images; - uint32_t n_rows; - uint32_t n_cols; - } hdr; - - input.read((char *) &hdr, sizeof(hdr)); - bswap_32((char *) &hdr, sizeof(hdr)); - if (hdr.magic_number != 2051) return nullptr; // Invalid MNIST image file - - InferenceEngine::Blob::Ptr blob(new InferenceEngine::TBlob({InferenceEngine::Precision::FP32, - {batch, hdr.n_images, hdr.n_rows, hdr.n_cols}, - InferenceEngine::NCHW})); - blob->allocate(); - float *blob_ptr = (float*)(void*)blob->buffer(); - for (int b = 0; b < batch; b++) { - input.seekg(sizeof(hdr), std::ios::beg); - int b_off = b*hdr.n_images*hdr.n_rows*hdr.n_cols; - for (uint32_t i = 0; i < hdr.n_images; ++i) { - for (uint32_t r = 0; r < hdr.n_rows; ++r) { - for (uint32_t c = 0; c < hdr.n_cols; ++c) { - unsigned char temp = 0; - input.read((char *) &temp, sizeof(temp)); - blob_ptr[b_off + i * hdr.n_rows * hdr.n_cols + r * hdr.n_cols + c] = temp; - } - } - } - } - return blob; -} - -InferenceEngine::Blob::Ptr TestsCommonFunc::readInput(std::string path, int batch) { - if ( path.substr(path.rfind('.') + 1) == "bmp" ) return readBMP(path, batch); - if ( path.substr(path.rfind('-') + 1) == "ubyte" ) return readUbyte(path, batch); - return nullptr; -} - -bool compareTopLikeObjDetection ( - InferenceEngine::Blob& blob, - std::vector> &ref_top, - int batch_to_compare = 0, - const bool compareRawValues = true) { - assert(blob.getTensorDesc().getDims().back() == 7); - - const int box_info_size = 7; - - int top_num = (int)ref_top.size(); - float *data_ptr = blob.buffer().as(); - const int data_size = blob.size(); - if (data_size/box_info_size < top_num) { - EXPECT_TRUE(data_size/box_info_size >= top_num) << "Dst blob contains less data then expected"; - return false; - } - -#ifdef DISPLAY_RESULTS - std::cout << "actual:" << std::endl; - for (int i = 0; i < top_num; i++) { - std::cout << "{" << data_ptr[i*box_info_size + 1] << ", " << data_ptr[i*box_info_size + 2] << "}" << std::endl; - } - - std::cout << "reference:" << std::endl; - for (int i = 0; i < top_num; i++) { - std::cout << "{" << ref_top[i].first << ", " << ref_top[i].second << "}" << std::endl; - } -#endif - - for (int i=0; i 0.005) { - EXPECT_NEAR(confidence, ref_top[i].second, ref_top[i].second * 0.005); - return false; - } - } - } - - return true; -} - -bool compareTopLikeClassification( - InferenceEngine::Blob& blob, - std::vector> &ref_top, - int batch_to_compare = 0, - float threshold = 0.005f, - const size_t classesCanBeChangedIndex = 9999, - const bool compareRawValues = true) { - int top_num = (int)ref_top.size(); - - size_t data_size = blob.size(); - float *data_ptr = (float*)(void*)blob.buffer(); - - int batch_size = blob.getTensorDesc().getDims()[0]; - assert(batch_size > batch_to_compare); - - const std::vector dims = blob.getTensorDesc().getDims(); - if ((dims.size() != 2ul) || (dims[1] != 1ul)) { - data_size /= batch_size; - } - data_ptr += data_size*batch_to_compare; - - std::vector top(data_size); - - for (size_t i = 0; i < data_size; i++) top[i] = (int)i; - std::partial_sort (top.begin(), top.begin()+top_num, top.end(), - [&](int l, int r) -> bool { return data_ptr[l] > data_ptr[r]; } ); - -#ifdef DISPLAY_RESULTS - std::cout << "actual:" << std::endl; - for (int i = 0; i < top_num; i++) { - std::cout << "{" << top[i] << ", " << data_ptr[top[i]] << "}" << std::endl; - } - - std::cout << "reference:" << std::endl; - for (int i = 0; i < top_num; i++) { - std::cout << "{" << ref_top[i].first << ", " << ref_top[i].second << "}" << std::endl; - } -#endif - - for (int i = 0 ; i < top_num; i++) { - if (top[i] != ref_top[i].first) { - if (i >= classesCanBeChangedIndex) { - bool wasFound = false; - for (int refIndex = 0; refIndex < top_num; refIndex++) { - if (top[i] == ref_top[refIndex].first) { - wasFound = true; - break; - } - } - - if (!wasFound) { - EXPECT_EQ(top[i], ref_top[i].first) << "class is different for element " << i << ": " << top[i] << ", reference: " << ref_top[i].first; - return false; - } - } else { - EXPECT_EQ(top[i], ref_top[i].first) << "class is different for element " << i << ": " << top[i] << ", reference: " << ref_top[i].first; - return false; - } - } - - if (compareRawValues && (fabs(data_ptr[top[i]] - ref_top[i].second)/ref_top[i].second > threshold)) { - EXPECT_NEAR(data_ptr[top[i]] , ref_top[i].second , ref_top[i].second * threshold); - return false; - } - } - return true; -} - -bool TestsCommonFunc::compareTop( - InferenceEngine::Blob& blob, - std::vector> &ref_top, - int batch_to_compare, - float threshold, - const size_t classesCanBeChangedIndex, - const bool compareRawValues) { - if ((blob.getTensorDesc().getDims().size() == 2) && (blob.size() == 3)) { - if (ref_top.size() != blob.size()) { - return false; - } - - const float* buffer = blob.buffer().as(); - -#ifdef DISPLAY_RESULTS - std::cout << "actual:" << std::endl; - for (int i = 0; i < ref_top.size(); i++) { - std::cout << "{" << buffer[i] << "}" << std::endl; - } - - std::cout << "reference:" << std::endl; - for (int i = 0; i < ref_top.size(); i++) { - std::cout << "{" << ref_top[i].first << ", " << ref_top[i].second << "}" << std::endl; - } -#endif - - for (size_t i = 0; i < blob.size(); ++i) { - if (std::abs(ref_top[i].second - buffer[i]) > threshold) { - return false; - } - } - return true; - } else if (blob.getTensorDesc().getDims().back() == 7) - return compareTopLikeObjDetection(blob, ref_top, batch_to_compare, compareRawValues); - else - return compareTopLikeClassification(blob, ref_top, batch_to_compare, threshold, classesCanBeChangedIndex, compareRawValues); -} diff --git a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp b/inference-engine/tests_deprecated/helpers/tests_common_func.hpp deleted file mode 100644 index a66eaae9c7bdd8..00000000000000 --- a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -// use to display additional test info: -// 1. low precision transformation parameters -// 2. reference and actual outputs -// #define DISPLAY_RESULTS - -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -class TestsCommonFunc { - static CNNLayerPtr getLayer(const CNNNetwork& network, const std::string& layerName) { - std::vector layers = InferenceEngine::details::CNNNetSortTopologically(network); - for (CNNLayerPtr layer : layers) { - if (layer->name == layerName) { - return layer; - } - } - - return nullptr; - } -public: - - InferenceEngine::Blob::Ptr readInput(std::string path, int batch = 1); - - static void checkLayerOuputPrecision( - const CNNNetwork& network, - const std::vector& layerNames, - const Precision expectedPrecision, - const std::string& type = "") { - for (const std::string& layerName : layerNames) { - if (!type.empty()) { - const CNNLayerPtr layer = getLayer(network, layerName); - if (layer == nullptr) { - IE_THROW() << "layer was not found " << layerName; - } - - if (layer->type != type) { - IE_THROW() << "layer '" << layer->name << "' type '" << layer->type << "' is not correct, expected " << type; - } - } - checkLayerOuputPrecision(network, layerName, expectedPrecision); - } - } - - static void checkLayerOuputPrecision(const CNNNetwork& network, const std::string& layerName, Precision expectedPrecision) { - CNNLayerPtr layer = getLayer(network, layerName); - if (layer == nullptr) { - IE_THROW() << "layer '" << layerName << "' was not found"; - } - for (DataPtr data : layer->outData) { - ASSERT_EQ(expectedPrecision, data->getPrecision()) << " unexpected precision " << data->getPrecision() << " for layer " << layerName; - } - } - - static void checkLayerOuputPrecision(const CNNNetwork& network, const std::string& layerName, std::vector expectedPrecisions) { - CNNLayerPtr layer = getLayer(network, layerName); - if (layer == nullptr) { - IE_THROW() << "layer '" << layerName << "' was not found"; - } - for (DataPtr data : layer->outData) { - ASSERT_TRUE(std::any_of( - expectedPrecisions.begin(), - expectedPrecisions.end(), - [&](const Precision precision) { return precision == data->getTensorDesc().getPrecision(); })) << - " unexpected precision " << data->getPrecision() << " for layer " << layerName; - } - } - - bool compareTop( - InferenceEngine::Blob& blob, - std::vector> &ref_top, - int batch_to_compare = 0, - float threshold = 0.005f, - const size_t classesCanBeChangedIndex = 9999, - const bool compareRawValues = true); -}; - -IE_SUPPRESS_DEPRECATED_END diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp deleted file mode 100644 index ffe853f7697581..00000000000000 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/dummy.cpp +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - From a13bd518fc43c0837ad51592bf11bc4890929068 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 12 May 2021 13:24:18 +0300 Subject: [PATCH 04/27] [IE CLDNN] Fixed CLDNN internal tests compilation (#5597) --- inference-engine/src/cldnn_engine/CMakeLists.txt | 2 -- inference-engine/thirdparty/clDNN/CMakeLists.txt | 4 ++-- inference-engine/thirdparty/clDNN/src/CMakeLists.txt | 4 ++++ .../thirdparty/clDNN/src/include/cldnn_itt.h | 2 +- .../clDNN/tests_core_internal/CMakeLists.txt | 12 ++++++++---- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/inference-engine/src/cldnn_engine/CMakeLists.txt b/inference-engine/src/cldnn_engine/CMakeLists.txt index 1ba2bc9e98e277..c7ac932910bd07 100644 --- a/inference-engine/src/cldnn_engine/CMakeLists.txt +++ b/inference-engine/src/cldnn_engine/CMakeLists.txt @@ -40,8 +40,6 @@ target_include_directories(${TARGET_NAME} PRIVATE set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -set_ie_threading_interface_for(clDNN_lib) # Failed because of OpenCL # ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) diff --git a/inference-engine/thirdparty/clDNN/CMakeLists.txt b/inference-engine/thirdparty/clDNN/CMakeLists.txt index c9cc74e38f7b82..6c63f932ed1900 100644 --- a/inference-engine/thirdparty/clDNN/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/CMakeLists.txt @@ -60,9 +60,9 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # ====================================== HELPER CONSTANT VARIABLES ===================================== # ====================================================================================================== # ====================================================================================================== -if("${CLDNN_THREADING}" MATCHES "SEQ") +if(CLDNN_THREADING MATCHES "SEQ") add_definitions(-DCLDNN_THREADING=CLDNN_THREADING_SEQ) -elseif("${CLDNN_THREADING}" MATCHES "TBB") +elseif(CLDNN_THREADING MATCHES "TBB") add_definitions(-DCLDNN_THREADING=CLDNN_THREADING_TBB) else() add_definitions(-DCLDNN_THREADING=CLDNN_THREADING_THREADPOOL) diff --git a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt index 160492150a437f..dd1fd4b300f4be 100644 --- a/inference-engine/thirdparty/clDNN/src/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/src/CMakeLists.txt @@ -157,6 +157,10 @@ target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE openvino::itt ) +if(COMMAND set_ie_threading_interface_for) + set_ie_threading_interface_for("${CLDNN_BUILD__PROJ}") +endif() + if(WIN32) target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE setupapi) elseif((NOT ANDROID) AND (UNIX)) diff --git a/inference-engine/thirdparty/clDNN/src/include/cldnn_itt.h b/inference-engine/thirdparty/clDNN/src/include/cldnn_itt.h index 979ba14aad0435..b28db96f29567f 100644 --- a/inference-engine/thirdparty/clDNN/src/include/cldnn_itt.h +++ b/inference-engine/thirdparty/clDNN/src/include/cldnn_itt.h @@ -14,7 +14,7 @@ namespace cldnn { namespace itt { namespace domains { - OV_ITT_DOMAIN(CLDNN); + OV_ITT_DOMAIN(CLDNN) } } } diff --git a/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt b/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt index 2d270f7c79db04..663f50000cc599 100644 --- a/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt @@ -107,18 +107,22 @@ add_executable("${CLDNN_BUILD__PROJ}" ${__CLDNN_AllSources} ) +if(COMMAND set_ie_threading_interface_for) + set_ie_threading_interface_for("${CLDNN_BUILD__PROJ}") +endif() + set_property(TARGET "${CLDNN_BUILD__PROJ}" PROPERTY PROJECT_LABEL "${CLDNN_BUILD__PROJ_LABEL}") set_property(TARGET "${CLDNN_BUILD__PROJ}" PROPERTY OUTPUT_NAME "${CLDNN_BUILD__PROJ_OUTPUT_NAME}") # Set library dependencies -target_link_libraries("${CLDNN_BUILD__PROJ}" "${CLDNN_BUILD__PROJ__clDNN}") +target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE "${CLDNN_BUILD__PROJ__clDNN}") if(WIN32) - target_link_libraries("${CLDNN_BUILD__PROJ}" setupapi) + target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE setupapi) elseif((NOT ANDROID) AND (UNIX)) - target_link_libraries("${CLDNN_BUILD__PROJ}" pthread) + target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE pthread) endif() -target_link_libraries("${CLDNN_BUILD__PROJ}" ${CLDNN__SYSTEM_LINK_LIBRARIES}) +target_link_libraries("${CLDNN_BUILD__PROJ}" PRIVATE ${CLDNN__SYSTEM_LINK_LIBRARIES}) # =================================== Custom pre- and post-steps ======================================= From 6971303856bea7bc8a97f56701b3748f3afd277b Mon Sep 17 00:00:00 2001 From: Gabriele Galiero Casay Date: Wed, 12 May 2021 12:47:50 +0200 Subject: [PATCH 05/27] Revise BatchNormInference Reference Implementation (#5468) * Refactor backend tests * Rewrite reference implementation * Align ref impl signature with order of inputs in v5 * Remove legacy code for training and backprop * BarchNorminference operation class review * Use reference to const in helpers to validate input shapes * Refactor type_prop tests to cover v0 and v5 * Add type_prop tests * epsilon attribute test with invalid value * invalid integer input types * Add serialization single layer test * Add attribute visitor test * Fix arm plugin test failure with dynamic element type * Remove CoordinateTransform index call * Add attribute count to visitor test --- .../serialization/single_layer/batch_norm.cpp | 54 ++ .../ngraph/runtime/reference/batch_norm.hpp | 189 +--- ngraph/core/src/op/batch_norm.cpp | 12 + ngraph/core/src/validation_util.cpp | 26 +- ngraph/test/CMakeLists.txt | 1 + ngraph/test/backend/batch_norm.in.cpp | 556 +++--------- ngraph/test/runtime/ie/unit_test.manifest | 9 - .../runtime/interpreter/evaluates_map.cpp | 4 +- ngraph/test/type_prop/batch_norm.cpp | 843 ++++++++---------- ngraph/test/visitors/op/batch_norm.cpp | 56 ++ 10 files changed, 675 insertions(+), 1075 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/batch_norm.cpp create mode 100644 ngraph/test/visitors/op/batch_norm.cpp diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/batch_norm.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/batch_norm.cpp new file mode 100644 index 00000000000000..04d878727b3e10 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/batch_norm.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/single_layer/batch_norm.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +TEST_P(BatchNormLayerTest, Serialize) { + Serialize(); +} + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector epsilon = { + 1e-6, + 1e-5, + 1e-4 +}; + +const std::vector> inputShapes = { + {1, 3}, + {2, 5}, + {1, 3, 10}, + {1, 3, 1, 1}, + {2, 5, 4, 4}, +}; + +const auto batchNormParams = testing::Combine( + testing::ValuesIn(epsilon), + testing::ValuesIn(netPrecisions), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes), + testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P( + smoke_BatchNorm_Serialization, + BatchNormLayerTest, + batchNormParams, + BatchNormLayerTest::getTestCaseName +); + +} // namespace diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/batch_norm.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/batch_norm.hpp index e2eeb1ba1cc34f..48d93baaf2316f 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/batch_norm.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/batch_norm.hpp @@ -7,15 +7,8 @@ #include #include -#include "ngraph/axis_vector.hpp" #include "ngraph/coordinate_transform.hpp" -#include "ngraph/runtime/reference/add.hpp" -#include "ngraph/runtime/reference/broadcast.hpp" -#include "ngraph/runtime/reference/divide.hpp" -#include "ngraph/runtime/reference/multiply.hpp" -#include "ngraph/runtime/reference/sum.hpp" #include "ngraph/shape.hpp" -#include "ngraph/util.hpp" namespace ngraph { @@ -23,177 +16,37 @@ namespace ngraph { namespace reference { + template + static inline T norm(T val, T mean, T var, T eps) + { + return ((val - mean) / static_cast(std::sqrt(var + eps))); + } + template void batch_norm_inference(float eps, + const T* in, const T* gamma, const T* beta, - const T* input, const T* mean, const T* variance, - T* normed_input, - const Shape& input_shape) - { - auto eps_casted = static_cast(eps); - CoordinateTransform input_transform(input_shape); - - for (Coordinate input_coord : input_transform) - { - auto channel_num = input_coord[1]; - auto channel_gamma = gamma[channel_num]; - auto channel_beta = beta[channel_num]; - auto channel_mean = mean[channel_num]; - auto channel_var = variance[channel_num]; - - auto input_index = input_transform.index(input_coord); - auto normalized = - (input[input_index] - channel_mean) / (std::sqrt(channel_var + eps_casted)); - normed_input[input_index] = normalized * channel_gamma + channel_beta; - } - } - - template - void batch_norm_training(float eps, - const T* gamma, - const T* beta, - const T* input, - T* normed_input, - T* mean, - T* variance, - const Shape& input_shape) + T* out, + const Shape& in_shape) { auto eps_casted = static_cast(eps); - auto channels = input_shape[1]; - - // We use these objects to iterate over the indices in a channel. - // The start and end points for the channel axis are modified in the loop. - Coordinate start_corner; - Coordinate end_corner; - for (size_t i = 0; i < input_shape.size(); i++) - { - start_corner.push_back(0); - end_corner.push_back(input_shape[i]); - } - for (size_t c = 0; c < channels; c++) + size_t in_idx = 0; + CoordinateTransform in_transform(in_shape); + for (Coordinate in_coord : in_transform) { - T channel_sum = 0; - - start_corner[1] = c; - end_corner[1] = c + 1; - - // Compute the mean - CoordinateTransform input_transform(input_shape, start_corner, end_corner); - for (Coordinate input_coord : input_transform) - { - channel_sum += input[input_transform.index(input_coord)]; - } - T channel_mean = channel_sum / (shape_size(input_shape) / channels); - mean[c] = channel_mean; - - T channel_diff_square_sum = 0; - for (Coordinate input_coord : input_transform) - { - auto centered = input[input_transform.index(input_coord)] - channel_mean; - channel_diff_square_sum += centered * centered; - } - T channel_var = channel_diff_square_sum / (shape_size(input_shape) / channels); - variance[c] = channel_var; - - auto channel_gamma = gamma[c]; - auto channel_beta = beta[c]; - T scale = channel_gamma / std::sqrt(channel_var + eps_casted); - - // Compute the normalized output - for (Coordinate input_coord : input_transform) - { - auto input_index = input_transform.index(input_coord); - normed_input[input_index] = - (input[input_index] - channel_mean) * scale + channel_beta; - } - } - } - - template - void batch_norm_backprop(float eps, - const T* gamma, - const T* /* beta */, - const T* input, - const T* mean, - const T* variance, - const T* delta_normed, - T* delta_input, - T* delta_gamma, - T* delta_beta, - const Shape& input_shape) - { - size_t channel_axis = 1; - auto num_channels = input_shape[channel_axis]; - Shape moment_shape = Shape{num_channels}; - auto input_num_elements = shape_size(input_shape); - auto elements_per_channel = input_num_elements / num_channels; - - Coordinate start_corner; - Coordinate end_corner; - for (size_t i = 0; i < input_shape.size(); i++) - { - start_corner.push_back(0); - end_corner.push_back(input_shape[i]); - } - // The forward computation in gory detail - // input[., C, ...] - // gamma[C] - // beta[C] - // mu[c:C] = sum(input[., c, ...])/elements_per_channel - // var[c:C] = sum(input[., c, ...]^2 - mu[c])/elements_per_channel - // inv_sqrt[c:C] = 1/sqrt(var[c]+epsilon) - // gammad[c:C] = gamma[c]*inv_sqrt[c] - // normed[., c:C, ...] = (input[., c, ...]-mu)*gammad[c]+beta[c] - - for (uint64_t c = 0; c < num_channels; ++c) - { - start_corner[channel_axis] = c; - end_corner[channel_axis] = c + 1; - - CoordinateTransform input_transform(input_shape, start_corner, end_corner); - T delta_beta_sum = 0; - T var = variance[c]; - T mu = mean[c]; - T var_eps = var + static_cast(eps); - T sqrt_var_eps = std::sqrt(var_eps); - T inv_sqrt_var_eps = 1 / sqrt_var_eps; - T gammad = gamma[c] * inv_sqrt_var_eps; - T delta_gammad = 0; - T delta_mu = 0; - for (Coordinate input_coord : input_transform) - { - auto idx = input_transform.index(input_coord); - auto delta_idx = delta_normed[idx]; - auto input_idx = input[idx]; - delta_beta_sum += delta_idx; - delta_gammad += (input_idx - mu) * delta_idx; - T delta_centered = gammad * delta_idx; - delta_input[idx] = delta_centered; - delta_mu -= delta_centered; - } - delta_beta[c] = delta_beta_sum; - delta_gamma[c] = delta_gammad * inv_sqrt_var_eps; - T delta_inv_sqrt = gamma[c] * delta_gammad; - // y = x^(-1/2) - // dy = -(1/2)x^(-3/2) = -y/(2x) dx - T delta_var = -delta_inv_sqrt * inv_sqrt_var_eps / (2 * var_eps); - T delta_two_var_sum = 2 * delta_var / elements_per_channel; - T delta_mu_over_n = delta_mu / elements_per_channel; - for (Coordinate input_coord : input_transform) - { - // v = 1/N sum(x_i - mu)^2 - // dv = 2/N sum[(x_i - mu)dx_i] - 2/N sum[(x_i - mu) dmu] - // = 2/N sum[(x_i - mu)dx_i] - 2/N (Nmu-Nmu) dmu - // = 2/N sum[(x_i - mu)dx_i] - auto idx = input_transform.index(input_coord); - // These two values mostly cancel out so add them first - auto val = delta_input[idx] + delta_mu_over_n; - delta_input[idx] = val + (input[idx] - mu) * delta_two_var_sum; - } + auto ch_num = in_coord[1]; + auto ch_gamma = gamma[ch_num]; + auto ch_beta = beta[ch_num]; + auto ch_mean = mean[ch_num]; + auto ch_var = variance[ch_num]; + + auto normalized = norm(in[in_idx], ch_mean, ch_var, eps_casted); + out[in_idx] = normalized * ch_gamma + ch_beta; + in_idx++; } } } // namespace reference diff --git a/ngraph/core/src/op/batch_norm.cpp b/ngraph/core/src/op/batch_norm.cpp index 8c38a07050d9bf..1f772cf67daa3a 100644 --- a/ngraph/core/src/op/batch_norm.cpp +++ b/ngraph/core/src/op/batch_norm.cpp @@ -40,6 +40,12 @@ void op::v0::BatchNormInference::validate_and_infer_types() PartialShape result_batch_shape; PartialShape result_channel_shape; // unused here + NODE_VALIDATION_CHECK( + this, + m_epsilon > 0, + "Attribute 'epsilon' must have non-zero positive floating-point value. Got: ", + m_epsilon); + set_output_size(1); std::tie(result_et, result_batch_shape, result_channel_shape) = infer_batch_norm_forward(this, @@ -94,6 +100,12 @@ void op::v5::BatchNormInference::validate_and_infer_types() PartialShape result_batch_shape; PartialShape result_channel_shape; // unused here + NODE_VALIDATION_CHECK( + this, + m_epsilon > 0, + "Attribute 'epsilon' must have non-zero positive floating-point value. Got: ", + m_epsilon); + set_output_size(1); std::tie(result_et, result_batch_shape, result_channel_shape) = infer_batch_norm_forward(this, diff --git a/ngraph/core/src/validation_util.cpp b/ngraph/core/src/validation_util.cpp index 2abb3eaf0ec320..d41a4334f50c65 100644 --- a/ngraph/core/src/validation_util.cpp +++ b/ngraph/core/src/validation_util.cpp @@ -580,7 +580,7 @@ static std::tuple infer_batch_norm_fo // messages. std::stringstream ss; bool first = true; - for (auto& inp : channel_shaped_inputs) + for (const auto& inp : channel_shaped_inputs) { if (!first) { @@ -594,24 +594,30 @@ static std::tuple infer_batch_norm_fo // Infer output element type. element::Type et_result{input_element_type}; - for (auto& inp : channel_shaped_inputs) + for (const auto& inp : channel_shaped_inputs) { NODE_VALIDATION_CHECK(node, element::Type::merge(et_result, et_result, inp.m_element_type), "Input element types do not match."); } + NODE_VALIDATION_CHECK(node, + et_result.is_dynamic() || et_result.is_real(), + "Input element types must be floating-point. Got: ", + et_result); + // Extract channel dimension from input shape. Dimension channel_dim{Dimension::dynamic()}; - NODE_VALIDATION_CHECK(node, - input_shape.is_dynamic() || input_shape.rank().get_length() >= 2, - "Input argument must have rank of at least 2 (input argument shape: ", - input_shape, - ")."); - - if (input_shape.rank().is_static()) + Rank input_rank = input_shape.rank(); + if (input_rank.is_static()) { + NODE_VALIDATION_CHECK(node, + input_rank.get_length() >= 2, + "Input argument must have rank of at least 2 (input argument shape: ", + input_shape, + ")."); + channel_dim = input_shape[1]; } @@ -619,7 +625,7 @@ static std::tuple infer_batch_norm_fo // "channel_dim". PartialShape channel_shape{PartialShape::dynamic()}; - for (auto& inp : channel_shaped_inputs) + for (const auto& inp : channel_shaped_inputs) { NODE_VALIDATION_CHECK(node, PartialShape::merge_into(channel_shape, inp.m_shape), diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index d4aabfccf8a884..ea68af8da2230f 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -210,6 +210,7 @@ set(SRC visitors/partial_shape.cpp visitors/user_op.cpp visitors/value_map.cpp + visitors/op/batch_norm.cpp visitors/op/broadcast.cpp visitors/op/bucketize.cpp visitors/op/constant.cpp diff --git a/ngraph/test/backend/batch_norm.in.cpp b/ngraph/test/backend/batch_norm.in.cpp index 663935352aa5c6..0e6419318d80a6 100644 --- a/ngraph/test/backend/batch_norm.in.cpp +++ b/ngraph/test/backend/batch_norm.in.cpp @@ -4,450 +4,154 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" +#include "util/test_case.hpp" +#include "util/engine/test_engines.hpp" #include "util/test_control.hpp" -#include "util/test_tools.hpp" using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); -template -class BatchNormInferenceTester +template +struct BatchNormTestParams { -public: - BatchNormInferenceTester(const std::shared_ptr& backend, - const Shape& input_shape, - element::Type etype, - double epsilon) - : m_backend(backend) - { - Shape channel_shape{input_shape.at(1)}; - - auto Input = make_shared(etype, input_shape); - auto Gamma = make_shared(etype, channel_shape); - auto Beta = make_shared(etype, channel_shape); - auto Mean = make_shared(etype, channel_shape); - auto Variance = make_shared(etype, channel_shape); - auto BN = - make_shared(Input, Gamma, Beta, Mean, Variance, epsilon); - m_function = make_shared(BN, ParameterVector{Input, Gamma, Beta, Mean, Variance}); - - m_input = backend->create_tensor(etype, input_shape); - m_gamma = backend->create_tensor(etype, channel_shape); - m_beta = backend->create_tensor(etype, channel_shape); - m_mean = backend->create_tensor(etype, channel_shape); - m_variance = backend->create_tensor(etype, channel_shape); - m_normed_input = backend->create_tensor(etype, input_shape); - } - - bool call(const std::vector& input, - const std::vector& gamma, - const std::vector& beta, - const std::vector& mean, - const std::vector& variance, - const std::vector& normed_input) - { - copy_data(m_input, input); - copy_data(m_gamma, gamma); - copy_data(m_beta, beta); - copy_data(m_mean, mean); - copy_data(m_variance, variance); - auto handle = m_backend->compile(m_function); - handle->call_with_validate({m_normed_input}, - {m_input, m_gamma, m_beta, m_mean, m_variance}); - auto res_normed_input = read_vector(m_normed_input); - return test::all_close(normed_input, res_normed_input); - } - -protected: - const std::shared_ptr& m_backend; - std::shared_ptr m_function; - std::shared_ptr m_input; - std::shared_ptr m_gamma; - std::shared_ptr m_beta; - std::shared_ptr m_mean; - std::shared_ptr m_variance; - std::shared_ptr m_normed_input; + std::vector in; + Shape in_shape; + std::vector in_g; + std::vector in_b; + std::vector in_m; + std::vector in_v; + float epsilon; + std::vector out; }; template -class BatchNormInferenceTesterZeroEpsilon : public BatchNormInferenceTester -{ -public: - // These are for documentation purposes only below - using Input = test::NDArray; - using Gamma = test::NDArray; - using Beta = test::NDArray; - using Mean = test::NDArray; - using Variance = test::NDArray; - using NormedInput = test::NDArray; - - BatchNormInferenceTesterZeroEpsilon(const std::shared_ptr& backend, - element::Type etype) - : BatchNormInferenceTester(backend, Shape{2, 3}, etype, 0.0) - { - } - - bool test(const Input& input, - const Gamma& gamma, - const Beta& beta, - const Mean& mean, - const Variance& variance, - const NormedInput& normed_input) - { - return BatchNormInferenceTester::call(input.get_vector(), - gamma.get_vector(), - beta.get_vector(), - mean.get_vector(), - variance.get_vector(), - normed_input.get_vector()); - } - bool test_gamma() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{2.0, 3.0, 4.0}, - Beta{0.0, 0.0, 0.0}, - Mean{0.0, 0.0, 0.0}, - Variance{1.0, 1.0, 1.0}, - NormedInput{{2.0, 6.0, 12.0}, {-2.0, -6.0, -12.0}}); - } - bool test_beta() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{2.0, -2.0, 3.0}, - Mean{0.0, 0.0, 0.0}, - Variance{1.0, 1.0, 1.0}, - NormedInput{{3.0, 0.0, 6.0}, {1.0, -4.0, 0.0}}); - } - bool test_mean() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{0.0, 0.0, 0.0}, - Mean{-2.0, 2.0, -3.0}, - Variance{1.0, 1.0, 1.0}, - NormedInput{{3.0, 0.0, 6.0}, {1.0, -4.0, 0.0}}); - } - bool test_variance() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{0.0, 0.0, 0.0}, - Mean{0.0, 0.0, 0.0}, - Variance{0.25, .0625, 4.0}, - NormedInput{{2.0, 8.0, 1.5}, {-2.0, -8.0, -1.5}}); - } -}; - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f64) +static void BatchNormInferenceTest(const BatchNormTestParams& p) { - using T = double; - auto& et = element::f64; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - BatchNormInferenceTesterZeroEpsilon bnt(backend, et); - EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; - EXPECT_TRUE(bnt.test_beta()) << "Beta test"; - EXPECT_TRUE(bnt.test_mean()) << "Mean test"; - EXPECT_TRUE(bnt.test_variance()) << "Variance test"; + const Shape ch_shape{p.in_shape.at(1)}; + auto input = make_shared(element::from(), p.in_shape); + auto gamma = make_shared(element::from(), ch_shape); + auto beta = make_shared(element::from(), ch_shape); + auto mean = make_shared(element::from(), ch_shape); + auto variance = make_shared(element::from(), ch_shape); + auto batch_norm = make_shared( + input, + gamma, + beta, + mean, + variance, + p.epsilon); + auto f = make_shared( + batch_norm, ParameterVector{input, gamma, beta, mean, variance}); + auto test_case = test::TestCase(f); + test_case.add_input(p.in_shape, p.in); + test_case.add_input(ch_shape, p.in_g); + test_case.add_input(ch_shape, p.in_b); + test_case.add_input(ch_shape, p.in_m); + test_case.add_input(ch_shape, p.in_v); + test_case.add_expected_output(p.in_shape, p.out); + test_case.run_with_tolerance_as_fp(1e-4f); } -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f32) -{ - using T = float; - auto& et = element::f32; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - BatchNormInferenceTesterZeroEpsilon bnt(backend, et); - EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; - EXPECT_TRUE(bnt.test_beta()) << "Beta test"; - EXPECT_TRUE(bnt.test_mean()) << "Mean test"; - EXPECT_TRUE(bnt.test_variance()) << "Variance test"; -} - -template -class BatchNormInferenceTesterNonZeroEpsilon : public BatchNormInferenceTester +NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_2d_f32) { -public: - // These are for documentation purposes only below - using Input = test::NDArray; - using Gamma = test::NDArray; - using Beta = test::NDArray; - using Mean = test::NDArray; - using Variance = test::NDArray; - using NormedInput = test::NDArray; - - BatchNormInferenceTesterNonZeroEpsilon(const std::shared_ptr& backend, - element::Type etype) - : BatchNormInferenceTester(backend, Shape{2, 3}, etype, 0.25) - { - } - - bool test(const Input& input, - const Gamma& gamma, - const Beta& beta, - const Mean& mean, - const Variance& variance, - const NormedInput& normed_input) - { - return BatchNormInferenceTester::call(input.get_vector(), - gamma.get_vector(), - beta.get_vector(), - mean.get_vector(), - variance.get_vector(), - normed_input.get_vector()); - } - bool test_gamma() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{2.0, 3.0, 4.0}, - Beta{0.0, 0.0, 0.0}, - Mean{0.0, 0.0, 0.0}, - Variance{0.75, 0.75, 0.75}, - NormedInput{{2.0, 6.0, 12.0}, {-2.0, -6.0, -12.0}}); - } - bool test_beta() - { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{2.0, -2.0, 3.0}, - Mean{0.0, 0.0, 0.0}, - Variance{0.75, 0.75, 0.75}, - NormedInput{{3.0, 0.0, 6.0}, {1.0, -4.0, 0.0}}); - } - bool test_mean() + const std::vector> batch_norm_tests{ + BatchNormTestParams{ + {1.0, 2.0, 3.0, -1.0, -2.0, -3.0}, + Shape{2, 3}, + {2.0, 3.0, 4.0}, + {0.0, 0.0, 0.0}, + {0.0, 0.0, 0.0}, + {0.75, 0.75, 0.75}, + 0.25, + {2.0, 6.0, 12.0, -2.0, -6.0, -12.0}}, + + BatchNormTestParams{ + {1.0, 2.0, 3.0, -1.0, -2.0, -3.0}, + Shape{2, 3}, + {1.0, 1.0, 1.0}, + {2.0, -2.0, 3.0}, + {0.0, 0.0, 0.0}, + {0.75, 0.75, 0.75}, + 0.25, + {3.0, 0.0, 6.0, 1.0, -4.0, 0.0}}, + + BatchNormTestParams{ + {1.0, 2.0, 3.0, -1.0, -2.0, -3.0}, + Shape{2, 3}, + {1.0, 1.0, 1.0}, + {0.0, 0.0, 0.0}, + {-2.0, 2.0, -3.0}, + {0.75, 0.75, 0.75}, + 0.25, + {3.0, 0.0, 6.0, 1.0, -4.0, 0.0}}, + + BatchNormTestParams{ + {3.0, 5.0, 1.0, -3.0, -5.0, -1.0}, + Shape{2, 3}, + {1.0, 1.0, 1.0}, + {0.0, 0.0, 0.0}, + {0.0, 0.0, 0.0}, + {2.0, 6.0, 0.0}, + 0.25, + {2.0, 2.0, 2.0, -2.0, -2.0, -2.0}}, + }; + + for(const auto& test_case : batch_norm_tests) { - return test(Input{{1.0, 2.0, 3.0}, {-1.0, -2.0, -3.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{0.0, 0.0, 0.0}, - Mean{-2.0, 2.0, -3.0}, - Variance{0.75, 0.75, 0.75}, - NormedInput{{3.0, 0.0, 6.0}, {1.0, -4.0, 0.0}}); + BatchNormInferenceTest(test_case); } - bool test_variance() - { - return test(Input{{3.0, 5.0, 1.0}, {-3.0, -5.0, -1.0}}, - Gamma{1.0, 1.0, 1.0}, - Beta{0.0, 0.0, 0.0}, - Mean{0.0, 0.0, 0.0}, - Variance{2.0, 6.0, 0.0}, - NormedInput{{2.0, 2.0, 2.0}, {-2.0, -2.0, -2.0}}); - } -}; - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f64) -{ - using T = double; - auto& et = element::f64; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - BatchNormInferenceTesterNonZeroEpsilon bnt(backend, et); - EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; - EXPECT_TRUE(bnt.test_beta()) << "Beta test"; - EXPECT_TRUE(bnt.test_mean()) << "Mean test"; - EXPECT_TRUE(bnt.test_variance()) << "Variance test"; -} - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f32) -{ - using T = float; - auto& et = element::f32; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - BatchNormInferenceTesterNonZeroEpsilon bnt(backend, et); - EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; - EXPECT_TRUE(bnt.test_beta()) << "Beta test"; - EXPECT_TRUE(bnt.test_mean()) << "Mean test"; - EXPECT_TRUE(bnt.test_variance()) << "Variance test"; -} - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) -{ - auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); - - auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); - - double eps = 0.001; - auto shape_r = Shape{2, 2, 2, 1}; - auto bn = make_shared(input, mvgb, mvgb, mvgb, mvgb, eps); - - auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); - copy_data(_input, - vector{0.54881352f, - 0.71518934f, - 0.60276335f, - 0.54488319f, - 0.42365479f, - 0.64589411f, - 0.4375872f, - 0.89177299f}); - - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); - copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); - - vector expected_result{0.54903894f, - 0.71533161f, - 0.60296183f, - 0.54511058f, - 0.42394274f, - 0.64607101f, - 0.43786817f, - 0.89182704f}; - auto handle = backend->compile(f); - handle->call_with_validate({bn_output}, {_input, _mvgb, _mvgb, _mvgb, _mvgb}); - - ASSERT_TRUE( - ngraph::test::all_close(expected_result, read_vector(bn_output), 1e-3f, 1e-4f)); } -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) +NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_4d_f32) { - auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); - - auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); - - double eps = 0.001; - auto shape_r = Shape{2, 2, 2, 1}; - auto bn = make_shared(input, mvgb, mvgb, mvgb, mvgb, eps); - - auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); - copy_data(_input, - vector{0.54881352f, - 0.71518934f, - 0.60276335f, - 0.54488319f, - 0.42365479f, - 0.64589411f, - 0.4375872f, - 0.89177299f}); - - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); - copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); - - vector expected_result{0.54903894f, - 0.71533161f, - 0.60296183f, - 0.54511058f, - 0.42394274f, - 0.64607101f, - 0.43786817f, - 0.89182704f}; - auto handle = backend->compile(f); - handle->call_with_validate({bn_output}, {_input, _mvgb, _mvgb, _mvgb, _mvgb}); - - ASSERT_TRUE( - ngraph::test::all_close(expected_result, read_vector(bn_output), 1e-3f, 1e-4f)); -} - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) -{ - auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); - auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); - auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); - auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); - auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); - double eps = 0.001; - auto shape_r = Shape{2, 2, 2, 1}; - auto bn = make_shared(input, gamma, beta, mean, var, eps); - - auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); - copy_data(_input, - vector{0.54881352f, - 0.71518934f, - 0.60276335f, - 0.54488319f, - 0.42365479f, - 0.64589411f, - 0.4375872f, - 0.89177299f}); - - auto _gamma = backend->create_tensor(element::f32, gamma_shape); - copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); - copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); - copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); - copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); - - vector expected_result{ - -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; - auto handle = backend->compile(f); - handle->call_with_validate({bn_output}, {_input, _gamma, _beta, _mean, _var}); - - ASSERT_TRUE( - ngraph::test::all_close(expected_result, read_vector(bn_output), 1e-3f, 1e-4f)); -} - -NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) -{ - auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); - auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); - auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); - auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); - auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); - double eps = 0.001; - auto shape_r = Shape{2, 2, 2, 1}; - auto bn = make_shared(input, gamma, beta, mean, var, eps); - - auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); - copy_data(_input, - vector{0.54881352f, - 0.71518934f, - 0.60276335f, - 0.54488319f, - 0.42365479f, - 0.64589411f, - 0.4375872f, - 0.89177299f}); - - auto _gamma = backend->create_tensor(element::f32, gamma_shape); - copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); - copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); - copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); - copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); - - vector expected_result{ - -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; - auto handle = backend->compile(f); - handle->call_with_validate({bn_output}, {_input, _gamma, _beta, _mean, _var}); - - ASSERT_TRUE( - ngraph::test::all_close(expected_result, read_vector(bn_output), 1e-3f, 1e-4f)); + float eps = 0.001; + Shape in_shape{2, 2, 2, 1}; + + std::vector in{0.54881352f, + 0.71518934f, + 0.60276335f, + 0.54488319f, + 0.42365479f, + 0.64589411f, + 0.4375872f, + 0.89177299f}; + + std::vector> ch_in_1{{1.0, 1.0}, + {1.0, 1.0}, + {1.0, 1.0}, + {1.0, 1.0}}; + std::vector out_1{0.54903894f, + 0.71533161f, + 0.60296183f, + 0.54511058f, + 0.42394274f, + 0.64607101f, + 0.43786817f, + 0.89182704f}; + + std::vector> ch_in_2{{1.0, 1.0}, + {0.0f, 0.0f}, + {0.583388f, 0.619252f}, + {0.0119972f, 0.0282681f}}; + std::vector out_2{-0.30327f, + 1.1561f, + -0.096382f, + -0.434702f, + -1.4011f, + 0.548275f, + -1.06187f, + 1.59295f}; + + const std::vector> batch_norm_tests{ + BatchNormTestParams{in, in_shape, ch_in_1[0], ch_in_1[1], ch_in_1[2], ch_in_1[3], eps, out_1}, + BatchNormTestParams{in, in_shape, ch_in_2[0], ch_in_2[1], ch_in_2[2], ch_in_2[3], eps, out_2} + }; + + for(const auto& test_case : batch_norm_tests) + { + BatchNormInferenceTest(test_case); + } } diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index ceeaa049648058..cac9e71d6038ae 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -950,15 +950,6 @@ gelu_f64 gelu_backprop_factor_f32 gelu_backprop_factor_f64 -# Incorrect precision f64! -batch_norm_inference_0eps_f64 -batch_norm_inference_f64 -batch_norm_training_0eps_f64 - -# Function inputs number differ from number of given inputs -batch_norm_inference_parameters_duplication -batch_norm_inference_parameters_duplication_v5 - backwards_abs backwards_acos backwards_add diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 8ab43d6e8efa62..1fc9a8d2f8f18e 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -1379,9 +1379,9 @@ namespace { using T = typename element_type_traits::value_type; runtime::reference::batch_norm_inference(op->get_eps_value(), + inputs[2]->get_data_ptr(), inputs[0]->get_data_ptr(), inputs[1]->get_data_ptr(), - inputs[2]->get_data_ptr(), inputs[3]->get_data_ptr(), inputs[4]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -1396,9 +1396,9 @@ namespace { using T = typename element_type_traits::value_type; runtime::reference::batch_norm_inference(op->get_eps_value(), + inputs[0]->get_data_ptr(), inputs[1]->get_data_ptr(), inputs[2]->get_data_ptr(), - inputs[0]->get_data_ptr(), inputs[3]->get_data_ptr(), inputs[4]->get_data_ptr(), outputs[0]->get_data_ptr(), diff --git a/ngraph/test/type_prop/batch_norm.cpp b/ngraph/test/type_prop/batch_norm.cpp index e9185988975381..13abbdf5a4f942 100644 --- a/ngraph/test/type_prop/batch_norm.cpp +++ b/ngraph/test/type_prop/batch_norm.cpp @@ -9,614 +9,537 @@ using namespace std; using namespace ngraph; -TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic) +struct BatchNormInferInputs { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; + element::Type in_et; + PartialShape in_shape; + std::string in_name; +}; + +struct BatchNormInferParams +{ + element::Type data_batch_et; + PartialShape data_batch_ps; + std::vector inputs; + double epsilon; +}; + +template +std::shared_ptr makeBatchNormOp(const BatchNormInferParams& p) +{ + if(p.inputs.size() != 4) + { + throw runtime_error("BatchNormInference requires 4 additional inputs for batch" + "normalization transformation"); + } + auto data_batch = make_shared(p.data_batch_et, p.data_batch_ps); + auto gamma = make_shared(p.inputs[0].in_et, p.inputs[0].in_shape); + auto beta = make_shared(p.inputs[1].in_et, p.inputs[1].in_shape); + auto mean = make_shared(p.inputs[2].in_et, p.inputs[2].in_shape); + auto variance = make_shared(p.inputs[3].in_et, p.inputs[3].in_shape); + return make_shared(data_batch, gamma, beta, mean, variance, p.epsilon); +} + +template +class BatchNormTest : public ::testing::Test +{ +}; + +TYPED_TEST_CASE_P(BatchNormTest); + +TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) +{ + PartialShape data_batch_shape{10, 100}; + element::Type inputs_et = element::f32; + + std::vector ch_inputs = + { + {inputs_et, PartialShape{100}, "gamma"}, + {inputs_et, PartialShape{100}, "beta"}, + {inputs_et, PartialShape{100}, "mean"}, + {inputs_et, PartialShape{100}, "variance"} + }; + double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; + auto bn = makeBatchNormOp(params); - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + ASSERT_EQ(bn->get_output_size(), 1); + ASSERT_EQ(bn->get_output_element_type(0), inputs_et); + ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_static()); + ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(data_batch_shape)); +} + +TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) +{ + PartialShape data_batch_shape{1, 10, 224, 224}; + element::Type inputs_et = element::f16; + + std::vector ch_inputs = + { + {inputs_et, PartialShape{10}, "gamma"}, + {inputs_et, PartialShape{10}, "beta"}, + {inputs_et, PartialShape{10}, "mean"}, + {inputs_et, PartialShape{10}, "variance"} + }; + + double epsilon = 0.001; + + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; + auto bn = makeBatchNormOp(params); ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); + ASSERT_EQ(bn->get_output_element_type(0), inputs_et); + ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_static()); + ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(data_batch_shape)); +} + +TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) +{ + PartialShape data_batch_shape{PartialShape::dynamic()}; + element::Type inputs_et = element::f32; + + std::vector ch_inputs = + { + {inputs_et, PartialShape::dynamic(), "gamma"}, + {inputs_et, PartialShape::dynamic(), "beta"}, + {inputs_et, PartialShape::dynamic(), "mean"}, + {inputs_et, PartialShape::dynamic(), "variance"} + }; + + double epsilon = 0.001; + + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; + auto bn = makeBatchNormOp(params); + + ASSERT_EQ(bn->get_output_size(), 1); + ASSERT_EQ(bn->get_output_element_type(0), inputs_et); ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic()); } -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_inputs_rank_dynamic) { PartialShape data_batch_shape{ 64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type inputs_et = element::f32; - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + std::vector ch_inputs = + { + {inputs_et, PartialShape::dynamic(), "gamma"}, + {inputs_et, PartialShape::dynamic(), "beta"}, + {inputs_et, PartialShape::dynamic(), "mean"}, + {inputs_et, PartialShape::dynamic(), "variance"} + }; + + double epsilon = 0.001; - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; + auto bn = makeBatchNormOp(params); ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); + ASSERT_EQ(bn->get_output_element_type(0), inputs_et); ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); } -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_channels) +TYPED_TEST_P( + BatchNormTest, + batch_norm_inference_data_batch_rank_dynamic_some_channel_inputs_rank_static) { - PartialShape data_batch_shape{ - Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + PartialShape data_batch_shape{PartialShape::dynamic()}; + element::Type input_et = element::f32; - try + std::vector inputs = { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Zero channel count not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Channel count must be at least 1")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } + {input_et, PartialShape{Dimension::dynamic()}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{Dimension::dynamic()}, "mean"}, + {input_et, PartialShape::dynamic(), "variance"} + }; + + double epsilon = 0.001; + + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; + auto bn = makeBatchNormOp(params); + + ASSERT_EQ(bn->get_output_size(), 1); + ASSERT_EQ(bn->get_output_element_type(0), input_et); + ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic()); } -TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_ok) +TYPED_TEST_P(BatchNormTest, + batch_norm_inference_data_batch_rank_static_some_channel_inputs_rank_static) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224}; + element::Type input_et = element::f32; + + std::vector inputs = + { + {input_et, PartialShape{3}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{3}, "mean"}, + {input_et, PartialShape{Dimension::dynamic()}, "variance"} + }; - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + double epsilon = 0.001; - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; + auto bn = makeBatchNormOp(params); ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic()); + ASSERT_EQ(bn->get_output_element_type(0), input_et); + ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( + PartialShape{64, 3, Dimension::dynamic(), 224})); } -TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_wrong_rank) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{Dimension::dynamic(), Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + PartialShape data_batch_shape{10, 100}; - try - { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Wrong gamma/beta/mean/variance shape not detected"; - } - catch (const NodeValidationFailure& error) + const std::vector inputs_et{ + element::i32, + element::u32, + element::boolean + }; + + double eps = 0.001; + + std::vector bn_tests; + for (const auto& et : inputs_et) { - EXPECT_HAS_SUBSTRING( - error.what(), - std::string("Shape for gamma/beta/mean/variance ({?,?}) does not have rank 1")); + std::vector ch_inputs = + { + {et, PartialShape{100}, "gamma"}, + {et, PartialShape{100}, "beta"}, + {et, PartialShape{100}, "mean"}, + {et, PartialShape{100}, "variance"} + }; + + bn_tests.push_back(BatchNormInferParams{et, data_batch_shape, ch_inputs, eps}); } - catch (...) + + for(const auto& params : bn_tests) { - FAIL() << "Deduced type check failed for unexpected reason"; + try + { + auto bn = makeBatchNormOp(params); + FAIL() << "Invalid input element types not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Input element types must be floating-point"); + } + catch (...) + { + FAIL() << "Input element types check failed for unexpected reason"; + } } } -TEST(type_prop, - batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_inconsistent_rank) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_types) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{3, Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + // Data batch input element type and shape + const element::Type data_batch_et = element::f32; + const PartialShape data_batch_ps{10, 200}; - try + // Invalid combination of element types of gamma/beta/mean/variance inputs + vector bn_ch_inputs = { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent gamma/beta/mean/variance shape not detected"; - } - catch (const NodeValidationFailure& error) + {element::f32, PartialShape{200}, "gamma"}, + {element::f32, PartialShape{200}, "beta"}, + {element::f32, PartialShape{200}, "mean"}, + {element::f32, PartialShape{200}, "variance"} + }; + + const double epsilon = 0.001; + + std::vector bn_params; + bn_params.push_back(BatchNormInferParams{element::f16, + data_batch_ps, + bn_ch_inputs, + epsilon}); + + for(size_t i = 0; i < bn_ch_inputs.size(); i++) { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Shapes for gamma/beta/mean/variance do not match")); + std::vector inputs = bn_ch_inputs; + (inputs[i]).in_et = element::f16; + bn_params.push_back(BatchNormInferParams{data_batch_et, + data_batch_ps, + inputs, + epsilon}); } - catch (...) + + // Run tests with incompatible input element types + for(const auto& bn_p : bn_params) { - FAIL() << "Deduced type check failed for unexpected reason"; + try + { + auto bn = makeBatchNormOp(bn_p); + FAIL() << "Incompatible input element types not detected"; + } + catch(const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Input element types do not match"); + } + catch(...) + { + FAIL() << "Input element types check failed for unexpected reason"; + } } } -TEST(type_prop, - batch_norm_inference_partial_input_rank_dynamic_some_static_inconsistent_channel_count) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{4}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + PartialShape data_batch_shape{Dimension::dynamic()}; + element::Type inputs_et = element::f32; + + std::vector ch_inputs = + { + {inputs_et, PartialShape::dynamic(), "gamma"}, + {inputs_et, PartialShape::dynamic(), "beta"}, + {inputs_et, PartialShape::dynamic(), "mean"}, + {inputs_et, PartialShape::dynamic(), "variance"} + }; + double epsilon = 0.001; + + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent gamma/beta/mean/variance channel count not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Data batch input with invalid rank 1 not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), - std::string("Shapes for gamma/beta/mean/variance do not match")); + "Input argument must have rank of at least 2 (input argument shape: {?})"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "Data batch input rank check failed for unexpected reason"; } } -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_static_ok) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_ranks) { - PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{3}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); - - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + PartialShape data_batch_shape{PartialShape::dynamic()}; + element::Type input_et = element::f32; - ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( - PartialShape{64, 3, Dimension::dynamic(), 224})); -} + std::vector inputs = + { + {input_et, PartialShape{3, Dimension::dynamic()}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{Dimension::dynamic()}, "mean"}, + {input_et, PartialShape::dynamic(), "variance"} + }; -TEST(type_prop, - batch_norm_inference_partial_input_rank_static_dynamic_some_static_inconsistent_channel_count) -{ - PartialShape data_batch_shape{64, 4, Dimension::dynamic(), 224}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{3}; - PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent input/gamma/beta/mean/variance channel count not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Incompatible gamma/beta/mean/variance input ranks not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), - std::string("Input channel dimension (4) does not match " - "shape for gamma/beta/mean/variance ({3})")); + "Shapes for gamma/beta/mean/variance do not match"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "gamma/beta/mean/variance input ranks check failed for unexpected reason"; } } -TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic_v5) +TYPED_TEST_P(BatchNormTest, + batch_norm_inference_incompatible_channel_inputs_channel_count) { PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); - - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + element::Type input_et = element::f32; - ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic()); -} - -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok_v5) -{ - PartialShape data_batch_shape{ - 64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); - - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); - - ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( - PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); -} + std::vector inputs = + { + {input_et, PartialShape{3}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{4}, "mean"}, + {input_et, PartialShape::dynamic(), "variance"} + }; -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_channels_v5) -{ - PartialShape data_batch_shape{ - Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - PartialShape gamma_shape{PartialShape::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{PartialShape::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Zero channel count not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Incompatible gamma/beta/mean/variance inputs channel count not detected"; } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Channel count must be at least 1")); + EXPECT_HAS_SUBSTRING(error.what(), + "Shapes for gamma/beta/mean/variance do not match"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "gamma/beta/mean/variance inputs channel count check failed for unexpected reason"; } } -TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_ok_v5) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) { PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type input_et = element::f32; - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); - - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); - - ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).rank().is_dynamic()); -} + std::vector inputs = + { + {input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "mean"}, + {input_et, PartialShape::dynamic(), "variance"} + }; -TEST(type_prop, - batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_wrong_rank_v5) -{ - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{Dimension::dynamic(), Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Wrong gamma/beta/mean/variance shape not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Invalid rank of gamma/beta/mean/variance inputs not detected"; } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING( error.what(), - std::string("Shape for gamma/beta/mean/variance ({?,?}) does not have rank 1")); + "Shape for gamma/beta/mean/variance ({?,?}) does not have rank 1"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "gamma/beta/mean/variance inputs rank check failed for unexpected reason"; } } -TEST(type_prop, - batch_norm_inference_partial_input_rank_dynamic_some_rank_static_dynamic_inconsistent_rank_v5) +TYPED_TEST_P(BatchNormTest, + batch_norm_inference_incompatible_data_batch_and_channel_inputs_channel_count) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{3, Dimension::dynamic()}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{Dimension::dynamic()}; - PartialShape variance_shape{PartialShape::dynamic()}; + PartialShape data_batch_shape{64, 4, Dimension::dynamic(), 224}; + element::Type input_et = element::f32; + + std::vector inputs = + { + {input_et, PartialShape{3}, "gamma"}, + {input_et, PartialShape::dynamic(), "beta"}, + {input_et, PartialShape{3}, "mean"}, + {input_et, PartialShape::dynamic(), "variance"} + }; + double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + const BatchNormInferParams params{input_et, data_batch_shape, inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent gamma/beta/mean/variance shape not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Incompatible data batch and gamma/beta/mean/variance channel count not detected"; } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Shapes for gamma/beta/mean/variance do not match")); + EXPECT_HAS_SUBSTRING(error.what(), "Input channel dimension (4) does not match " + "shape for gamma/beta/mean/variance ({3})"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "Data batch and gamma/beta/mean/variance channel count check failed for unexpected reason"; } } -TEST(type_prop, - batch_norm_inference_partial_input_rank_dynamic_some_static_inconsistent_channel_count_v5) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_zero) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{4}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + PartialShape data_batch_shape{ + Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; + element::Type inputs_et = element::f32; + std::vector ch_inputs = + { + {inputs_et, PartialShape::dynamic(), "gamma"}, + {inputs_et, PartialShape::dynamic(), "beta"}, + {inputs_et, PartialShape::dynamic(), "mean"}, + {inputs_et, PartialShape::dynamic(), "variance"} + }; + + double epsilon = 0.001; + + const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, epsilon}; try { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent gamma/beta/mean/variance channel count not detected"; + auto bn = makeBatchNormOp(params); + FAIL() << "Data batch channel count zero not detected"; } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Shapes for gamma/beta/mean/variance do not match")); + EXPECT_HAS_SUBSTRING(error.what(), "Channel count must be at least 1"); } catch (...) { - FAIL() << "Deduced type check failed for unexpected reason"; + FAIL() << "Data batch channel count check failed for unexpected reason"; } } -TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_static_ok_v5) +TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_epsilon) { - PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{3}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + PartialShape data_batch_shape{10, 100}; + element::Type inputs_et = element::f32; - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); + std::vector ch_inputs = + { + {inputs_et, PartialShape{100}, "gamma"}, + {inputs_et, PartialShape{100}, "beta"}, + {inputs_et, PartialShape{100}, "mean"}, + {inputs_et, PartialShape{100}, "variance"} + }; - auto bn = - make_shared(data_batch, gamma, beta, mean, variance, epsilon); + double eps_zero = 0.0; + double eps_neg = -1.0; - ASSERT_EQ(bn->get_output_size(), 1); - ASSERT_EQ(bn->get_output_element_type(0), data_batch_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( - PartialShape{64, 3, Dimension::dynamic(), 224})); -} + const std::vector bn_tests{ + BatchNormInferParams{inputs_et, data_batch_shape, ch_inputs, eps_zero}, + BatchNormInferParams{inputs_et, data_batch_shape, ch_inputs, eps_neg} + }; -TEST( - type_prop, - batch_norm_inference_partial_input_rank_static_dynamic_some_static_inconsistent_channel_count_v5) -{ - PartialShape data_batch_shape{64, 4, Dimension::dynamic(), 224}; - PartialShape gamma_shape{3}; - PartialShape beta_shape{PartialShape::dynamic()}; - PartialShape mean_shape{3}; - PartialShape variance_shape{PartialShape::dynamic()}; - double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; - - auto data_batch = make_shared(data_batch_et, data_batch_shape); - auto gamma = make_shared(gamma_et, gamma_shape); - auto beta = make_shared(beta_et, beta_shape); - auto mean = make_shared(mean_et, mean_shape); - auto variance = make_shared(variance_et, variance_shape); - - try - { - auto bn = make_shared( - data_batch, gamma, beta, mean, variance, epsilon); - FAIL() << "Inconsistent input/gamma/beta/mean/variance channel count not detected"; - } - catch (const NodeValidationFailure& error) + for(const auto& params : bn_tests) { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Input channel dimension (4) does not match " - "shape for gamma/beta/mean/variance ({3})")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; + try + { + auto bn = makeBatchNormOp(params); + FAIL() << "Invalid 'epsilon' attribute value not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "Attribute 'epsilon' must have non-zero positive floating-point value."); + } + catch (...) + { + FAIL() << "Positive 'epsilon' attribute value check failed for unexpected reason"; + } } } + +REGISTER_TYPED_TEST_CASE_P( + BatchNormTest, + batch_norm_inference_basic_data_batch_rank_2, + batch_norm_inference_basic_data_batch_rank_4, + batch_norm_inference_inputs_rank_dynamic, + batch_norm_inference_data_batch_rank_static_channel_inputs_rank_dynamic, + batch_norm_inference_data_batch_rank_dynamic_some_channel_inputs_rank_static, + batch_norm_inference_data_batch_rank_static_some_channel_inputs_rank_static, + batch_norm_inference_invalid_inputs_element_types, + batch_norm_inference_incompatible_inputs_element_types, + batch_norm_inference_invalid_data_batch_input_rank, + batch_norm_inference_incompatible_channel_input_ranks, + batch_norm_inference_incompatible_channel_inputs_channel_count, + batch_norm_inference_invalid_channel_inputs_rank, + batch_norm_inference_incompatible_data_batch_and_channel_inputs_channel_count, + batch_norm_inference_invalid_input_channels_count_zero, + batch_norm_inference_invalid_epsilon); + +using Types = ::testing::Types; +INSTANTIATE_TYPED_TEST_CASE_P(type_prop, BatchNormTest, Types, ); diff --git a/ngraph/test/visitors/op/batch_norm.cpp b/ngraph/test/visitors/op/batch_norm.cpp new file mode 100644 index 00000000000000..9f5e6e0bda2192 --- /dev/null +++ b/ngraph/test/visitors/op/batch_norm.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "ngraph/opsets/opset3.hpp" +#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" + +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +template +class BatchNormAttrTest : public ::testing::Test +{ +}; + +TYPED_TEST_CASE_P(BatchNormAttrTest); + +TYPED_TEST_P(BatchNormAttrTest, batch_norm_inference_op) +{ + PartialShape in_shape{1, 10}; + PartialShape ch_shape{in_shape[1]}; + element::Type et = element::f32; + double epsilon = 0.001; + + NodeBuilder::get_ops().register_factory(); + auto data_batch = make_shared(et, in_shape); + auto gamma = make_shared(et, ch_shape); + auto beta = make_shared(et, ch_shape); + auto mean = make_shared(et, ch_shape); + auto var = make_shared(et, ch_shape); + auto batch_norm = make_shared(data_batch, gamma, beta, mean, var, epsilon); + + const auto expected_attr_count = 1; + NodeBuilder builder(batch_norm); + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + auto g_batch_norm = as_type_ptr(builder.create()); + EXPECT_EQ(g_batch_norm->get_eps_value(), batch_norm->get_eps_value()); +} + +REGISTER_TYPED_TEST_CASE_P( + BatchNormAttrTest, + batch_norm_inference_op); + +using Types = ::testing::Types; + +INSTANTIATE_TYPED_TEST_CASE_P(attributes, BatchNormAttrTest, Types); From f458bd4384ef251e8d440f4d172fd49f9f4a30bc Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 12 May 2021 13:56:12 +0300 Subject: [PATCH 06/27] [IE CLDNN] Disable CoreThreadingTestsWithIterations test skipping (#5495) --- .../plugin/gpu/shared_tests_instances/skip_tests_config.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index a9e6a6aeac0750..8cad5eddcd3fda 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -11,8 +11,6 @@ std::vector disabledTestPatterns() { return { //TODO: Issue: 34748 R"(.*(ComparisonLayerTest).*)", - // TODO: Issue: 39014 - R"(.*CoreThreadingTestsWithIterations.*smoke_LoadNetwork.*)", // TODO: Issue: 39612 R"(.*Interpolate.*cubic.*tf_half_pixel_for_nn.*FP16.*)", // Expected behavior From abd663463d0601fbf1281ebae063b5e69b09bc69 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 12 May 2021 14:17:43 +0300 Subject: [PATCH 07/27] Mak SO objct to be mandatory argument for VariablState (#5572) --- inference-engine/include/cpp/ie_executable_network.hpp | 4 ++-- inference-engine/include/cpp/ie_infer_request.hpp | 4 ++-- inference-engine/include/cpp/ie_memory_state.hpp | 9 +++++---- .../src/inference_engine/cpp/ie_variable_state.cpp | 7 ++++--- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/inference-engine/include/cpp/ie_executable_network.hpp b/inference-engine/include/cpp/ie_executable_network.hpp index 6f6866997547af..1001e65903df0d 100644 --- a/inference-engine/include/cpp/ie_executable_network.hpp +++ b/inference-engine/include/cpp/ie_executable_network.hpp @@ -35,8 +35,8 @@ class INFERENCE_ENGINE_API_CLASS(ExecutableNetwork) { std::shared_ptr _impl; std::shared_ptr _so; - explicit ExecutableNetwork(const std::shared_ptr& impl, - const std::shared_ptr& so); + ExecutableNetwork(const std::shared_ptr& impl, + const std::shared_ptr& so); friend class InferencePlugin; diff --git a/inference-engine/include/cpp/ie_infer_request.hpp b/inference-engine/include/cpp/ie_infer_request.hpp index 4ca0e59335d896..8ae8f1be3b948a 100644 --- a/inference-engine/include/cpp/ie_infer_request.hpp +++ b/inference-engine/include/cpp/ie_infer_request.hpp @@ -36,8 +36,8 @@ class INFERENCE_ENGINE_API_CLASS(InferRequest) { std::shared_ptr _impl; std::shared_ptr _so; - explicit InferRequest(const std::shared_ptr& impl, - const std::shared_ptr& so); + InferRequest(const std::shared_ptr& impl, + const std::shared_ptr& so); friend class ExecutableNetwork; diff --git a/inference-engine/include/cpp/ie_memory_state.hpp b/inference-engine/include/cpp/ie_memory_state.hpp index 5baecc2de5c4b0..6907862efc1048 100644 --- a/inference-engine/include/cpp/ie_memory_state.hpp +++ b/inference-engine/include/cpp/ie_memory_state.hpp @@ -11,9 +11,10 @@ #pragma once #include +#include +#include "ie_api.h" #include "ie_blob.h" -#include "details/ie_so_loader.h" namespace InferenceEngine { @@ -28,15 +29,15 @@ class IVariableStateInternal; */ class INFERENCE_ENGINE_API_CLASS(VariableState) { std::shared_ptr _impl = nullptr; - details::SharedObjectLoader::Ptr _so = nullptr; + std::shared_ptr _so = nullptr; /** * @brief Constructs VariableState from the initialized std::shared_ptr * @param impl Initialized shared pointer * @param so Optional: Plugin to use. This is required to ensure that VariableState can work properly even if plugin object is destroyed. */ - explicit VariableState(const std::shared_ptr& impl, - const details::SharedObjectLoader::Ptr& so = {}); + VariableState(const std::shared_ptr& impl, + const std::shared_ptr& so); friend class InferRequest; friend class ExecutableNetwork; diff --git a/inference-engine/src/inference_engine/cpp/ie_variable_state.cpp b/inference-engine/src/inference_engine/cpp/ie_variable_state.cpp index b50a03beb9600a..5d4a09dcb5d03b 100644 --- a/inference-engine/src/inference_engine/cpp/ie_variable_state.cpp +++ b/inference-engine/src/inference_engine/cpp/ie_variable_state.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "details/ie_so_loader.h" #include "cpp/ie_memory_state.hpp" #include "cpp_interfaces/interface/ie_ivariable_state_internal.hpp" #include "exception2status.hpp" @@ -18,8 +19,8 @@ namespace InferenceEngine { -VariableState::VariableState(const std::shared_ptr& impl, - const details::SharedObjectLoader::Ptr& so) : _impl(impl), _so(so) { +VariableState::VariableState(const std::shared_ptr& impl, + const std::shared_ptr& so) : _impl(impl), _so(so) { if (impl == nullptr) { IE_THROW(NotAllocated) << "VariableState wrapper was not initialized."; } @@ -49,4 +50,4 @@ void VariableState::SetState(Blob::Ptr state) { VARIABLE_CALL_STATEMENT(_impl->SetState(state)); } -} // namespace InferenceEngine \ No newline at end of file +} // namespace InferenceEngine From 2ba5c344bec165a03216312b9f5bcca9bcd36053 Mon Sep 17 00:00:00 2001 From: Jozef Daniecki Date: Wed, 12 May 2021 13:35:12 +0200 Subject: [PATCH 08/27] Add unit tests for Convert operation (#5558) * Add Serialization SLT for Convert op. * Add comment with explanaition to convert ref impl. * Refactored backend tests for Convert operation. * Give better names to backend tests. * Add more backend unit tests. * Fixed tests related to u1/u4/i4 types. --- .../serialization/single_layer/convert.cpp | 37 ++ .../ngraph/runtime/reference/convert.hpp | 1 + ngraph/core/src/op/convert.cpp | 17 + ngraph/test/backend/convert.in.cpp | 463 ++++++++++++++---- ngraph/test/runtime/ie/unit_test.manifest | 22 +- 5 files changed, 444 insertions(+), 96 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/convert.cpp diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert.cpp new file mode 100644 index 00000000000000..c56d444dcdc94b --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/convert.hpp" + +#include + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector> inShape = {{1, 2, 3, 4}}; + +const std::vector precisions = { + InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8, + InferenceEngine::Precision::I8, InferenceEngine::Precision::U16, + InferenceEngine::Precision::I16, InferenceEngine::Precision::U32, + InferenceEngine::Precision::I32, InferenceEngine::Precision::U64, + InferenceEngine::Precision::I64, InferenceEngine::Precision::BF16, + InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP64}; + +TEST_P(ConvertLayerTest, Serialize) { + Serialize(); +} + +INSTANTIATE_TEST_CASE_P( + smoke_Serialization_ConvertLayerTest, ConvertLayerTest, + ::testing::Combine(::testing::Values(inShape), + ::testing::ValuesIn(precisions), + ::testing::ValuesIn(precisions), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConvertLayerTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp index 0374cb18da0ee7..8591f88c79457c 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convert.hpp @@ -29,6 +29,7 @@ namespace ngraph template <> void convert(const float16* arg, float* out, size_t count); + // overload to handle ngraph::boolean (it is stored as char) template typename std::enable_if::value>::type convert(const TI* arg, TO* out, size_t count) diff --git a/ngraph/core/src/op/convert.cpp b/ngraph/core/src/op/convert.cpp index 47ce6907322ab9..8999113ba08133 100644 --- a/ngraph/core/src/op/convert.cpp +++ b/ngraph/core/src/op/convert.cpp @@ -26,6 +26,23 @@ op::Convert::Convert(const Output& arg, const element::Type& destination_t void op::Convert::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_Convert_validate_and_infer_types); + const element::Type data_et = get_input_element_type(0); + const element::Type destination_et = m_destination_type; + + NODE_VALIDATION_CHECK(this, + data_et != element::u1 && data_et != element::u4 && + data_et != element::i4, + "Input element type '", + data_et, + "' is not supported."); + + NODE_VALIDATION_CHECK(this, + destination_et != element::u1 && destination_et != element::u4 && + destination_et != element::i4, + "Destination element type '", + destination_et, + "' is not supported."); + set_output_type(0, m_destination_type, get_input_partial_shape(0)); } diff --git a/ngraph/test/backend/convert.in.cpp b/ngraph/test/backend/convert.in.cpp index ab9f33c6743ff6..46159d0cbc2e40 100644 --- a/ngraph/test/backend/convert.in.cpp +++ b/ngraph/test/backend/convert.in.cpp @@ -21,138 +21,421 @@ using namespace ngraph; static string s_manifest = "${MANIFEST}"; using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); +namespace +{ + template + void ConvertTest(const std::vector& input, + const Shape& input_shape, + const ngraph::element::Type& input_type, + const std::vector& expected_output, + const ngraph::element::Type& expected_output_type) + { + const auto in = make_shared(input_type, input_shape); + const auto convert = make_shared(in, expected_output_type); + const auto f = make_shared(NodeVector{convert}, ParameterVector{in}); + + auto test_case = test::TestCase(f); + test_case.add_input(input); + test_case.add_expected_output(expected_output); + + test_case.run(); + } +} // namespace + +// destination: boolean +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_boolean) +{ + const uint8_t lowest = std::numeric_limits::lowest(); + const uint8_t max = std::numeric_limits::max(); + + const std::vector input{0, 12, 23, 0, lowest, max}; + const Shape input_shape{2, 3}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 1, 1, 0, 0, 1}; + const element::Type expected_output_type = ngraph::element::boolean; -NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32) + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_boolean) { - Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + const int32_t lowest = std::numeric_limits::lowest(); + const int32_t max = std::numeric_limits::max(); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + const std::vector input{0, -12, 23, 0, lowest, max}; + const Shape input_shape{2, 3}; + const element::Type input_type = ngraph::element::i32; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{281, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + const std::vector expected_output{0, 1, 1, 0, 1, 1}; + const element::Type expected_output_type = ngraph::element::boolean; - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{281, 2, 3, 4}), read_vector(result))); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) +NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_boolean) { - Shape shape{2, 2}; - auto A = make_shared(element::u16, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + const float lowest = std::numeric_limits::lowest(); + const float max = std::numeric_limits::max(); + const float min = std::numeric_limits::min(); + const float pos_inf = std::numeric_limits::infinity(); + const float neg_inf = -std::numeric_limits::infinity(); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + const std::vector input{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}; + const Shape input_shape{3, 3}; + const element::Type input_type = ngraph::element::f32; - // Create some tensors for input/output - auto a = backend->create_tensor(element::u16, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + const std::vector expected_output{0, 1, 1, 0, 1, 1, 1, 1, 1}; + const element::Type expected_output_type = ngraph::element::boolean; - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) +// destination: bf16 +NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_bf16) { - Shape shape{2, 3}; - auto A = make_shared(element::i32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + const std::vector input{ + 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; + const Shape input_shape{1, 1, 3, 5}; + const element::Type input_type = ngraph::element::f32; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + const std::vector expected_output(std::begin(input), std::end(input)); + const element::Type expected_output_type = ngraph::element::bf16; - int32_t lowest = std::numeric_limits::lowest(); - int32_t max = std::numeric_limits::max(); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: f16 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f16) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{0, 12, 23, 0, lowest, max}); - auto result = backend->create_tensor(element::boolean, shape); + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const element::Type expected_output_type = ngraph::element::f16; - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 1, 1, 0, 1, 1}), read_vector(result)); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) +// destination: f32 +NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_f32_is_not_supported_yet) { - Shape shape{3, 3}; - auto A = make_shared(element::f32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + const std::vector input{0x00, 0x00}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::i4; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + const std::vector expected_output{0.0f, 0.0f, 0.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; - float lowest = std::numeric_limits::lowest(); - float max = std::numeric_limits::max(); - float min = std::numeric_limits::min(); - float pos_inf = std::numeric_limits::infinity(); - float neg_inf = -std::numeric_limits::infinity(); + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); +} - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}); - auto result = backend->create_tensor(element::boolean, shape); +NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_f32) +{ + const std::vector input{-127, -0, 0, 127}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::i8; + + const std::vector expected_output{-127.0f, -0.0f, 0.0f, 127.0f}; + const element::Type expected_output_type = ngraph::element::f32; - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{0, 1, 1, 0, 1, 1, 1, 1, 1}), read_vector(result)); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bf16) +NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_f32) { - const vector a_data = { - 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; + const std::vector input{-32000, -0, 0, 32000}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::i16; + + const std::vector expected_output{-32000.0f, -0.0f, 0.0f, 32000.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_f32) +{ + const std::vector input{-64000, -0, 0, 64000}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::i32; + + const std::vector expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_f32) +{ + const std::vector input{-64000, -0, 0, 64000}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::i64; + + const std::vector expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_f32_is_not_supported_yet) +{ + const std::vector input{0x00}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u1; + + const std::vector expected_output{0.0f, 0.0f, 0.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_f32_is_not_supported_yet) +{ + const std::vector input{0x00, 0x00}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u4; + + const std::vector expected_output{0.0f, 0.0f, 0.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f32) +{ + const std::vector input{255, 128, 32, 0}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{255.0f, 128.0f, 32.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} - const auto A = make_shared(element::f32, Shape{1, 1, 3, 5}); - const auto convert = make_shared(A, element::bf16); - const auto f = make_shared(NodeVector{convert}, ParameterVector{A}); +NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_f32) +{ + const std::vector input{64000, 32000, 128, 0}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u16; + + const std::vector expected_output{64000.0f, 32000.0f, 128.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_f32) +{ + const std::vector input{4000000, 2000000, 128, 0}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u32; + + const std::vector expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; - auto test_case = test::TestCase(f); - test_case.add_input(a_data); - test_case.add_expected_output( - std::vector(std::begin(a_data), std::end(a_data))); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_f32) +{ + const std::vector input{4000000, 2000000, 128, 0}; + const Shape input_shape{2, 2}; + const element::Type input_type = ngraph::element::u64; + + const std::vector expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f}; + const element::Type expected_output_type = ngraph::element::f32; - test_case.run(); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_float32) +NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_f32) { - const vector a_data = { + const std::vector input{ 0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}; + const Shape input_shape{1, 1, 3, 5}; + const element::Type input_type = ngraph::element::bf16; + + const std::vector expected_output(std::begin(input), std::end(input)); + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} - const auto A = make_shared(element::bf16, Shape{1, 1, 3, 5}); - const auto convert = make_shared(A, element::f32); - const auto f = make_shared(NodeVector{convert}, ParameterVector{A}); +NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_f32) +{ + const std::vector input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; + const Shape input_shape{3, 3}; + const element::Type input_type = ngraph::element::f16; + + const std::vector expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_f32) +{ + const std::vector input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; + const Shape input_shape{3, 3}; + const element::Type input_type = ngraph::element::f32; + + const std::vector expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; + const element::Type expected_output_type = ngraph::element::f32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: f64 +// not supported by IE, hence no tests + +// destination: i4 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i4_is_not_supported_yet) +{ + const std::vector input{0, 0, 0, 0}; + const Shape input_shape{4}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0x00, 0x00}; + const element::Type expected_output_type = ngraph::element::i4; + + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); +} + +// destination: i8 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i8) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 128}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const element::Type expected_output_type = ngraph::element::i8; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: i16 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i16) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const element::Type expected_output_type = ngraph::element::i16; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: i32 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i32) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const element::Type expected_output_type = ngraph::element::i32; - auto test_case = test::TestCase(f); - test_case.add_input(a_data); - test_case.add_expected_output(std::vector(std::begin(a_data), std::end(a_data))); + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: i64 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i64) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; + const element::Type expected_output_type = ngraph::element::i64; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: u1 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u1_is_not_supported_yet) +{ + const std::vector input{0, 0, 0, 0}; + const Shape input_shape{4}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0x00}; + const element::Type expected_output_type = ngraph::element::u1; + + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); +} + +// destination: u4 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u4_is_not_supported_yet) +{ + const std::vector input{0, 0, 0, 0}; + const Shape input_shape{4}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0x00, 0x00}; + const element::Type expected_output_type = ngraph::element::u4; - test_case.run(); + ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type), + ngraph::NodeValidationFailure); } -NGRAPH_TEST(${BACKEND_NAME}, convert_fp16_float32) +// destination: u8 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u8) { - std::vector f32vec = {-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5}; - std::vector f16vec(std::begin(f32vec), std::end(f32vec)); - std::vector result(f32vec.size()); - runtime::reference::convert(f16vec.data(), result.data(), f32vec.size()); - EXPECT_EQ(result, f32vec); + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const element::Type expected_output_type = ngraph::element::u8; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: u16 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u16) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const element::Type expected_output_type = ngraph::element::u16; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } -NGRAPH_TEST(${BACKEND_NAME}, convert_uint8_fp16) +// destination: u32 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u32) { - std::vector u8vec = {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142}; - std::vector f16vec(std::begin(u8vec), std::end(u8vec)); - std::vector result(u8vec.size()); - runtime::reference::convert(u8vec.data(), result.data(), u8vec.size()); - EXPECT_EQ(result, f16vec); + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const element::Type expected_output_type = ngraph::element::u32; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); +} + +// destination: u64 +NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u64) +{ + const std::vector input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const Shape input_shape{11}; + const element::Type input_type = ngraph::element::u8; + + const std::vector expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127}; + const element::Type expected_output_type = ngraph::element::u64; + + ConvertTest(input, input_shape, input_type, expected_output, expected_output_type); } diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index cac9e71d6038ae..9e8bb87ca58ac2 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -53,7 +53,6 @@ onnx_model_addmul_abc IE_CPU.interpolate_down_scales_const_linear # data [] doesn't exist -convert_float32_bool broadcast_trivial aliased_output bool_init_raw @@ -824,9 +823,6 @@ cum_sum_2dim cum_sum_3d cum_sum_2dim_allmodes -# Cannot create MKLDNNMemoryDesc from TensorDesc. Unsupported precision! -convert_uint16_float32 - # Unsupported primitive of type: Ceiling ceiling @@ -886,7 +882,6 @@ strided_slice_stride_optional divide_int32 divide_cpp_rounding_int32 divide_python_rounding_int32 -convert_int32_bool lesseq_int32 # Constant and Low Precision @@ -1056,6 +1051,22 @@ roll_3d_input roll_3d_input_negative_shift roll_negative_axes +# convert operation +IE_CPU.convert_f16_to_f32 +IE_CPU.convert_u8_to_f16 +IE_CPU.convert_u8_to_i16 +IE_CPU.convert_u8_to_i64 +IE_CPU.convert_u8_to_u16 +IE_CPU.convert_u8_to_u32 +IE_CPU.convert_u8_to_u64 +IE_CPU.convert_u8_to_boolean +IE_CPU.convert_i32_to_boolean +IE_CPU.convert_f32_to_boolean +IE_CPU.convert_u32_to_f32 # NOT_IMPLEMENTED +IE_CPU.convert_i4_to_f32 # NOT_IMPLEMENTED +IE_CPU.convert_u1_to_f32 # NOT_IMPLEMENTED +IE_CPU.convert_u4_to_f32 # NOT_IMPLEMENTED + #------------------------------------------------------------------------------- # # Inference Engine CPU plugin excludes @@ -1418,7 +1429,6 @@ IE_GPU.divide_overload IE_GPU.divide_by_zero_float32 IE_GPU.cosh IE_GPU.cos -IE_GPU.convert_int32_float32 IE_GPU.concat_negative_axis IE_GPU.concat_matrix_colwise IE_GPU.concat_matrix_rowwise From 3c9354816300521c0d6ea8af27773a4c3a666e7a Mon Sep 17 00:00:00 2001 From: Nico Galoppo Date: Wed, 12 May 2021 04:35:50 -0700 Subject: [PATCH 09/27] Fix python_tools benchmark installation location (#5539) * Fix python_tools benchmark installation location Before this fix, when running "make install", the benchmark python files would be installed under /openvino/tools, instead of /openvino/tools/benchmark. This commit fixes this. * Alternative implementation --- tools/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index fa573d5b519f87..51aeebf395bce8 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -45,7 +45,7 @@ if(ENABLE_PYTHON) DESTINATION python/${PYTHON_VERSION}/openvino/tools COMPONENT python_tools_${PYTHON_VERSION}) - install(DIRECTORY benchmark/ + install(DIRECTORY benchmark DESTINATION python/${PYTHON_VERSION}/openvino/tools USE_SOURCE_PERMISSIONS COMPONENT python_tools_${PYTHON_VERSION}) From b5c43d8a7875614d40cd6b2e085f3708fd48ca71 Mon Sep 17 00:00:00 2001 From: Anuj Mittal Date: Wed, 12 May 2021 19:37:23 +0800 Subject: [PATCH 10/27] plugin_api: fix build with gcc 11 (#5546) Some C++ Standard Library headers have been changed in gcc 11 to no longer include other headers that they do need to depend on. Include exception explicitly to avoid: | inference-engine/src/plugin_api/ie_system_conf.h:21:31: error: 'exception_ptr' in namespace 'std' does not name a type; did you mean 'exception'? | 21 | INFERENCE_ENGINE_API_CPP(std::exception_ptr&) CurrentException(); | | ^~~~~~~~~~~~~ Signed-off-by: Anuj Mittal --- inference-engine/src/plugin_api/ie_system_conf.h | 1 + 1 file changed, 1 insertion(+) diff --git a/inference-engine/src/plugin_api/ie_system_conf.h b/inference-engine/src/plugin_api/ie_system_conf.h index 93d633c35f961c..28c1e155e87fa8 100644 --- a/inference-engine/src/plugin_api/ie_system_conf.h +++ b/inference-engine/src/plugin_api/ie_system_conf.h @@ -11,6 +11,7 @@ #include "ie_api.h" #include +#include namespace InferenceEngine { From bb943e880ab83f36d90f2c7666d2feaf74174635 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 12 May 2021 14:59:29 +0300 Subject: [PATCH 11/27] [IE TESTS] Add util which allow to update skip_test_config for conformance (#5493) --- .../utils/update_skip_test_config.py | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/update_skip_test_config.py diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/update_skip_test_config.py b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/update_skip_test_config.py new file mode 100644 index 00000000000000..81f1c750b9685e --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/update_skip_test_config.py @@ -0,0 +1,86 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import glob +import argparse + + +def parse_arguments(): + parser = argparse.ArgumentParser() + + skip_config_help = "Paths to folder with skip_config_files" + input_folders_help = "Paths to folders with logs" + + parser.add_argument("-s", "--skip_config_folders", help=skip_config_help, nargs='*', required=True) + parser.add_argument("-i", "--input_logs", help=input_folders_help, nargs='*', required=True) + + return parser.parse_args() + + +def is_conformance(content: str): + if 'conformance' in content: + return True + return False + + +def is_hung_test(content: str): + if content == '' or \ + "SKIPPED" in content or \ + "FAILED" in content or \ + "Unexpected application crash!" in content or \ + "PASSED" in content: + return False + return True + + +def get_device_name(content: str): + target_device_str = 'TargetDevice=' + pos_start = content.find(target_device_str) + pos_end = content.find('\n') + return f'{content[pos_start + len(target_device_str):pos_end]}'.lower() + + +def get_regex(content: str): + ir_name_str = 'IR_name=' + pos_start = content.find(ir_name_str) + pos_end = content.find('.xml_') + return f'.*{content[pos_start + len(ir_name_str):pos_end]}.*\n' + + +def get_conformance_hung_test(test_log_dirs: list): + regexp = dict() + for test_log_dir in test_log_dirs: + if not os.path.isdir(test_log_dir): + continue + for log_file in glob.glob(os.path.join(test_log_dir, '*/*')): + with open(log_file) as log: + content = log.read() + if is_hung_test(content) or not is_conformance(content): + continue + device = get_device_name(content) + if 'arm' in content: + device = 'arm' + if not device in regexp.keys(): + regexp.update({device: []}) + regexp[device].append(get_regex(content)) + for device, re_list in regexp.items(): + re_list.sort() + return regexp + + +def save_to_file(skip_folder_paths: list, regexps: dict): + for skip_folder_path in skip_folder_paths: + if not os.path.isdir(skip_folder_path): + continue + skip_files_paths = glob.glob(os.path.join(skip_folder_path, 'skip_config_*.lst')) + for skip_files_path in skip_files_paths: + for device, re_list in regexps.items(): + if device in skip_files_path: + with open(skip_files_path, 'w') as file: + file.writelines(re_list) + + +if __name__ == "__main__": + args = parse_arguments() + save_to_file(args.skip_config_folders, get_conformance_hung_test(args.input_logs)) From 1843d5cc165801d7c9c3cd44bfdf5985a9554f58 Mon Sep 17 00:00:00 2001 From: Alexander Zhogov Date: Wed, 12 May 2021 14:59:57 +0300 Subject: [PATCH 12/27] Azure CI: Remove empty MklDnnFunctionalTests (#5590) --- .ci/azure/linux.yml | 9 +-------- .ci/azure/mac.yml | 8 -------- .ci/azure/windows.yml | 11 ----------- 3 files changed, 1 insertion(+), 27 deletions(-) diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 01c16af1e87376..3de3240bb0f99c 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -125,6 +125,7 @@ jobs: displayName: 'nGraph UT' continueOnError: false + # python3 $(WORK_DIR)/gtest-parallel/gtest_parallel.py $(BIN_DIR)/InferenceEngineUnitTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=InferenceEngineUnitTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - script: $(BIN_DIR)/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:TEST-InferenceEngineUnitTests.xml displayName: 'IE UT old' continueOnError: false @@ -161,14 +162,6 @@ jobs: displayName: 'CPU FuncTests' continueOnError: false - - script: | - export DATA_PATH=$(MODELS_PATH) - export MODELS_PATH=$(MODELS_PATH) - python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - workingDirectory: $(WORK_DIR) - displayName: 'MklDnnFunctionalTests' - continueOnError: false - - script: | export DATA_PATH=$(MODELS_PATH) export MODELS_PATH=$(MODELS_PATH) diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml index c3c2a631606234..3ba29d392c0b2d 100644 --- a/.ci/azure/mac.yml +++ b/.ci/azure/mac.yml @@ -137,14 +137,6 @@ jobs: displayName: 'CPU FuncTests' continueOnError: false - - script: | - export DATA_PATH=$(MODELS_PATH) - export MODELS_PATH=$(MODELS_PATH) - python3 $(WORK_DIR)/gtest-parallel/gtest-parallel $(BIN_DIR)/MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke*:-smoke_MobileNet/ModelTransformationsTest.LPT/mobilenet_v2_tf_depthwise_batch1_inPluginDisabled_inTestDisabled_asymmetric* -- --gtest_print_time=1 - workingDirectory: $(WORK_DIR) - displayName: 'MklDnnFunctionalTests' - continueOnError: false - - script: | export DATA_PATH=$(MODELS_PATH) export MODELS_PATH=$(MODELS_PATH) diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index a8efee2deaedba..0ae20539c9b9e4 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -167,17 +167,6 @@ jobs: displayName: 'CPU FuncTests - IB' continueOnError: false - # Add for gtest-parallel, it hangs now (CVS-33386) - #python $(WORK_DIR)\gtest-parallel\gtest-parallel $(BIN_DIR)\MklDnnFunctionalTests --workers=$(WORKERS_NUMBER) --dump_json_test_results=MklDnnFunctionalTests.json --gtest_filter=*smoke* -- --gtest_print_time=1 - - script: | - set PATH=$(TEST_ENV_PATH) - set DATA_PATH=$(MODELS_PATH) - set MODELS_PATH=$(MODELS_PATH) - rem "$(IB_TESTCONSOLE)" $(BIN_DIR)\MklDnnFunctionalTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-MklDnnFunctionalTests-IB.xml - $(BIN_DIR)\MklDnnFunctionalTests.exe --gtest_filter=*smoke* --gtest_output=xml:TEST-MklDnnFunctionalTests.xml - displayName: 'MklDnnFunctionalTests' - continueOnError: false - - script: | set PATH=$(TEST_ENV_PATH) set DATA_PATH=$(MODELS_PATH) From 7fa93b226e2b02e098b15402367d99796b9b9708 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 12 May 2021 15:01:02 +0300 Subject: [PATCH 13/27] [IE TESTS] Skip all tests in conformance suite for myriad plugin (#5560) --- .../test_runner/skip_configs/skip_config_myriad.lst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_myriad.lst b/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_myriad.lst index 54f642b9875866..4ce2c5b20b09ea 100644 --- a/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_myriad.lst +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/skip_configs/skip_config_myriad.lst @@ -143,4 +143,7 @@ .*Add_1087636.* .*Add_2868.* .*Add_2979.* -.*Add_53543.* \ No newline at end of file +.*Add_53543.* + +# Temporary decision to skip all tests (To collect correct test number) +.* \ No newline at end of file From fe5ca28b6eb9c015776b66718e99d8945fab40c7 Mon Sep 17 00:00:00 2001 From: iliya mironov Date: Wed, 12 May 2021 15:33:03 +0300 Subject: [PATCH 14/27] Smal fix with tf env_setup (#5476) * Smal fix with tf env_setup * Fix tf loader * Fix version checker --- model-optimizer/mo/front/tf/loader.py | 2 +- model-optimizer/mo/utils/versions_checker.py | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/model-optimizer/mo/front/tf/loader.py b/model-optimizer/mo/front/tf/loader.py index 0e0b102b8ee3fb..b51e14f26d56ed 100644 --- a/model-optimizer/mo/front/tf/loader.py +++ b/model-optimizer/mo/front/tf/loader.py @@ -215,7 +215,7 @@ def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpo if model_dir: # saved model directory try: - env_setup = get_environment_setup() + env_setup = get_environment_setup("tf") # enable eager execution temporarily while TensorFlow 2 model is being loaded tf_v1.enable_eager_execution() # code to extract GraphDef for TF 2.0 SavedModel format diff --git a/model-optimizer/mo/utils/versions_checker.py b/model-optimizer/mo/utils/versions_checker.py index 17ee21f14d1fe7..b9495f35041b64 100644 --- a/model-optimizer/mo/utils/versions_checker.py +++ b/model-optimizer/mo/utils/versions_checker.py @@ -196,9 +196,10 @@ def version_check(name, installed_v, required_v, sign, not_satisfied_v): not_satisfied_v.append((name, 'installed: {}'.format(installed_v), 'required: {} {}'.format(sign, required_v))) -def get_environment_setup(): +def get_environment_setup(framework): """ Get environment setup such as Python version, TensorFlow version + :param framework: framework name :return: a dictionary of environment variables """ env_setup = dict() @@ -207,9 +208,10 @@ def get_environment_setup(): sys.version_info.micro) env_setup['python_version'] = python_version try: - exec("import tensorflow") - env_setup['tensorflow'] = sys.modules["tensorflow"].__version__ - exec("del tensorflow") + if framework == 'tf': + exec("import tensorflow") + env_setup['tensorflow'] = sys.modules["tensorflow"].__version__ + exec("del tensorflow") except (AttributeError, ImportError): pass env_setup['sys_platform'] = sys.platform @@ -228,7 +230,7 @@ def check_requirements(framework=None): :param framework: framework name :return: exit code (0 - execution successful, 1 - error) """ - env_setup = get_environment_setup() + env_setup = get_environment_setup(framework) if framework is None: framework_suffix = "" elif framework == "tf": From ed4d3fc4ed8923d06d6d7659360bf3f5696e5f12 Mon Sep 17 00:00:00 2001 From: Paul Youngsoo Ahn Date: Wed, 12 May 2021 22:00:12 +0900 Subject: [PATCH 15/27] [IE CLDNN] Disable extended eltwise fusing on gen12 (#5584) --- .../prepare_primitive_fusing.cpp | 5 +++-- .../tests/test_cases/fusings_gpu_test.cpp | 22 ++++++++++++++----- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp index 21011545282ad6..ffd8fbd245c93c 100644 --- a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp +++ b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp @@ -355,6 +355,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) { bool recalc_processing_order = false; std::map> fusing_history; + const uint8_t supports_immad = p.get_engine().get_device_info().supports_immad; auto itr = p.get_processing_order().begin(); while (itr != p.get_processing_order().end()) { auto node_itr = itr++; @@ -520,7 +521,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) { // find original dependency of current_node using fusing_history // and check the number of users of it. // If the node has multiple users it's not fusible. - if (input_data.has_fused_primitives()) { + if (!supports_immad && input_data.has_fused_primitives()) { size_t num_original_dependencies = 0; auto iter = fusing_history.find(current_node_id); if (iter != fusing_history.end()) { @@ -872,7 +873,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) { bool merge_allowed = true; // If fused node is not convolution and fused node has multiple users, // follow the legacy checking rule - if (fused_node->is_type() && fused_node->get_users().size() > 1) { + if (!supports_immad && fused_node->is_type() && fused_node->get_users().size() > 1) { // Allowed new pattern: Elt1, Act, Elt2, Elt3, Elt4 are fused to Conv1 // * Conv1 -> Eltw1(Add) -> Act(Clamp) -> Eltw2(Mul) -> Eltw3(Mul) -> Eltw4(Add) -> Conv2 // * \–----------------------------------->/ \---------> Eltw5(Div) diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp index a14d6d7975acdf..0b65758bb4df67 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp @@ -833,9 +833,12 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_prelu_eltwise, bc_test_params{CASE_CONV_FP16_4, 2, 4}, }), ); - class conv_fp32_multi_eltwise_2 : public ConvFusingTest {}; TEST_P(conv_fp32_multi_eltwise_2, basic) { + if (engine.get_info().supports_immad) { + return; + } + auto p = GetParam(); create_topologies(input_layout("input", get_input_layout(p)), data("eltwise_data", get_mem(get_output_layout(p))), @@ -866,10 +869,12 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_multi_eltwise_2, class conv_fp32_multi_eltwise_2_clamp : public ConvFusingTest {}; - TEST_P(conv_fp32_multi_eltwise_2_clamp, basic) { - auto p = GetParam(); + if (engine.get_info().supports_immad) { + return; + } + auto p = GetParam(); create_topologies(input_layout("input", get_input_layout(p)), data("eltwise1_data", get_mem(get_output_layout(p))), data("bias", get_mem(get_bias_layout(p))), @@ -900,10 +905,12 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_multi_eltwise_2_clamp, class conv_fp32_multi_eltwise_4_clamp : public ConvFusingTest {}; - TEST_P(conv_fp32_multi_eltwise_4_clamp, basic) { - auto p = GetParam(); + if (engine.get_info().supports_immad) { + return; + } + auto p = GetParam(); create_topologies(input_layout("input", get_input_layout(p)), data("eltwise1_data", get_mem(get_output_layout(p))), data("eltwise2_data", get_mem(get_output_layout(p))), @@ -939,6 +946,10 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_multi_eltwise_4_clamp, class conv_fp32_multi_eltwise_3_fusing : public ConvFusingTest {}; TEST_P(conv_fp32_multi_eltwise_3_fusing, basic) { + if (engine.get_info().supports_immad) { + return; + } + auto p = GetParam(); create_topologies(input_layout("input", get_input_layout(p)), data("eltwise_data1", get_mem(get_output_layout(p))), @@ -988,6 +999,7 @@ TEST_P(conv_fp32_multi_eltwise_quantization, basic) { eltwise("eltwise2", "eltwise1", "quantize", eltwise_mode::prod), reorder("reorder_bfyx", "eltwise2", p.default_format, data_types::f32) ); + tolerance = 1.f; execute(p); } From b4565b7b4f3863792d1c7023e9683bec5c191072 Mon Sep 17 00:00:00 2001 From: Gleb Kazantaev Date: Wed, 12 May 2021 18:42:56 +0300 Subject: [PATCH 16/27] Add Model Optimizer --transform option (#5504) * Execute MO stages inside subprocess to have single IE check * Add --transform key * Updated ofline transformations to execute user specified passes; updated logic to raise when --transform is used * Parametrize LowLatency transformation with num_iterations key * Fixed MO and IE versions comparision * Use subprocess for offline transformations execution to catch errors * remove ie_is_available from IR; fixed typo * Fix for old IE versions * Update parse_transform key unit tests * Show available transformations * Fixed typo * Fix review comments * Fix python2 compatibility * Fixed review comments * Fixed __main__ import --- .../offline_transformations_api.pyx | 5 +- .../offline_transformations_api_impl.cpp | 3 +- .../offline_transformations_api_impl.hpp | 2 +- .../offline_transformations_api_impl_defs.pxd | 3 +- model-optimizer/automation/package_BOM.txt | 6 ++ model-optimizer/mo.py | 13 +-- model-optimizer/mo/__main__.py | 15 +-- .../mo/back/offline_transformations.py | 54 ++++++---- model-optimizer/mo/main.py | 44 +++++++-- model-optimizer/mo/main_caffe.py | 10 ++ model-optimizer/mo/main_kaldi.py | 10 ++ model-optimizer/mo/main_mxnet.py | 10 ++ model-optimizer/mo/main_onnx.py | 10 ++ model-optimizer/mo/main_tf.py | 10 ++ model-optimizer/mo/subprocess_main.py | 42 ++++++++ model-optimizer/mo/utils/check_ie_bindings.py | 24 +++-- model-optimizer/mo/utils/cli_parser.py | 98 ++++++++++++++++++- model-optimizer/mo/utils/version.py | 4 +- model-optimizer/mo_caffe.py | 13 +-- model-optimizer/mo_kaldi.py | 13 +-- model-optimizer/mo_mxnet.py | 13 +-- model-optimizer/mo_onnx.py | 13 +-- model-optimizer/mo_tf.py | 13 +-- .../unit_tests/mo/utils/cli_parser_test.py | 70 ++++++++++++- .../unit_tests/mo/utils/version_test.py | 11 ++- 25 files changed, 382 insertions(+), 127 deletions(-) create mode 100644 model-optimizer/mo/main_caffe.py create mode 100644 model-optimizer/mo/main_kaldi.py create mode 100644 model-optimizer/mo/main_mxnet.py create mode 100644 model-optimizer/mo/main_onnx.py create mode 100644 model-optimizer/mo/main_tf.py create mode 100644 model-optimizer/mo/subprocess_main.py diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api.pyx b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api.pyx index dd7300d33dd6da..bd101280fcbb16 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api.pyx @@ -6,6 +6,7 @@ from ..inference_engine.ie_api cimport IENetwork from libcpp cimport bool from libcpp.string cimport string +from libc.stdint cimport int64_t def ApplyMOCTransformations(IENetwork network, bool cf): @@ -16,8 +17,8 @@ def ApplyPOTTransformations(IENetwork network, string device): C.ApplyPOTTransformations(network.impl, device) -def ApplyLowLatencyTransformation(IENetwork network): - C.ApplyLowLatencyTransformation(network.impl) +def ApplyLowLatencyTransformation(IENetwork network, int64_t num_iterations=1): + C.ApplyLowLatencyTransformation(network.impl, num_iterations) def ApplyPruningTransformation(IENetwork network): diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp index d5c628b5b187a6..b9ff879da8c843 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.cpp @@ -26,8 +26,9 @@ void InferenceEnginePython::ApplyPOTTransformations(InferenceEnginePython::IENet manager.run_passes(network.actual->getFunction()); } -void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network) { +void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network, int64_t num_iterations) { ngraph::pass::Manager manager; + // TODO: pass num_iterations to LowLatency manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.hpp index 81aca0622a5c8a..504388e4afc1ad 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl.hpp @@ -15,7 +15,7 @@ void ApplyMOCTransformations(InferenceEnginePython::IENetwork network, bool cf); void ApplyPOTTransformations(InferenceEnginePython::IENetwork network, std::string device); -void ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network); +void ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network, int64_t num_iterations); void ApplyPruningTransformation(InferenceEnginePython::IENetwork network); diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl_defs.pxd index d9d50139dafe3b..726880e9353f37 100644 --- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl_defs.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/offline_transformations_api_impl_defs.pxd @@ -3,6 +3,7 @@ from libcpp cimport bool from libcpp.string cimport string +from libc.stdint cimport int64_t from ..inference_engine.ie_api_impl_defs cimport IENetwork @@ -11,7 +12,7 @@ cdef extern from "offline_transformations_api_impl.hpp" namespace "InferenceEngi cdef void ApplyPOTTransformations(IENetwork network, string device) - cdef void ApplyLowLatencyTransformation(IENetwork network) + cdef void ApplyLowLatencyTransformation(IENetwork network, int64_t num_iterations) cdef void ApplyPruningTransformation(IENetwork network) diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index d236df8f3d2189..b6f7236c35f734 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -939,6 +939,11 @@ mo/graph/graph.py mo/graph/perm_inputs.py mo/graph/port.py mo/main.py +mo/main_caffe.py +mo/main_kaldi.py +mo/main_mxnet.py +mo/main_onnx.py +mo/main_tf.py mo/middle/__init__.py mo/middle/passes/__init__.py mo/middle/passes/conv.py @@ -1004,6 +1009,7 @@ mo/ops/unsqueeze.py mo/pipeline/__init__.py mo/pipeline/common.py mo/pipeline/unified.py +mo/subprocess_main.py mo/utils/__init__.py mo/utils/broadcasting.py mo/utils/check_ie_bindings.py diff --git a/model-optimizer/mo.py b/model-optimizer/mo.py index bc8fcf9daa9e36..d8a02b7f9649e6 100755 --- a/model-optimizer/mo.py +++ b/model-optimizer/mo.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version # pylint: disable=no-name-in-module if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module - - sys.exit(main(get_all_cli_parser(), None)) + from mo.subprocess_main import subprocess_main # pylint: disable=no-name-in-module + subprocess_main(framework=None) diff --git a/model-optimizer/mo/__main__.py b/model-optimizer/mo/__main__.py index 1e84a6a65a94d5..9d0e0f5013e310 100644 --- a/model-optimizer/mo/__main__.py +++ b/model-optimizer/mo/__main__.py @@ -1,16 +1,5 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version # pylint: disable=no-name-in-module - -ret_code = check_python_version() -if ret_code: - sys.exit(ret_code) - -from mo.main import main -from mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module - -sys.exit(main(get_all_cli_parser(), None)) - +from mo.subprocess_main import subprocess_main +subprocess_main(framework=None) diff --git a/model-optimizer/mo/back/offline_transformations.py b/model-optimizer/mo/back/offline_transformations.py index 22b317ab7650d6..e9069547484e94 100644 --- a/model-optimizer/mo/back/offline_transformations.py +++ b/model-optimizer/mo/back/offline_transformations.py @@ -3,28 +3,48 @@ import argparse -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--input_model") - parser.add_argument("--framework") - args = parser.parse_args() - path_to_model = args.input_model +from mo.utils.error import Error +from mo.utils.cli_parser import parse_transform - # This variable is only needed by GenerateMappingFile transformation - # to produce correct mapping - extract_names = True if args.framework in ['tf', 'mxnet', 'kaldi'] else False +def get_available_transformations(): try: - from openvino.inference_engine import IECore, read_network # pylint: disable=import-error - from openvino.offline_transformations import ApplyMOCTransformations, GenerateMappingFile, CheckAPI # pylint: disable=import-error + from openvino.offline_transformations import ApplyLowLatencyTransformation # pylint: disable=import-error + return { + 'LowLatency': ApplyLowLatencyTransformation, + } except Exception as e: - print("[ WARNING ] {}".format(e)) - exit(1) + return {} + + +def apply_offline_transformations(input_model: str, framework: str, transforms: list): + # This variable is only needed by GenerateMappingFile transformation + # to produce correct mapping + extract_names = framework in ['tf', 'mxnet', 'kaldi'] - CheckAPI() + from openvino.inference_engine import read_network # pylint: disable=import-error + from openvino.offline_transformations import ApplyMOCTransformations, GenerateMappingFile # pylint: disable=import-error - net = read_network(path_to_model + "_tmp.xml", path_to_model + "_tmp.bin") - net.serialize(path_to_model + ".xml", path_to_model + ".bin") - path_to_mapping = path_to_model + ".mapping" + net = read_network(input_model + "_tmp.xml", input_model + "_tmp.bin") + + available_transformations = get_available_transformations() + + for name, args in transforms: + if name not in available_transformations.keys(): + raise Error("Transformation {} is not available.".format(name)) + + available_transformations[name](net, **args) + + net.serialize(input_model + ".xml", input_model + ".bin") + path_to_mapping = input_model + ".mapping" GenerateMappingFile(net, path_to_mapping.encode('utf-8'), extract_names) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_model") + parser.add_argument("--framework") + parser.add_argument("--transform") + args = parser.parse_args() + + apply_offline_transformations(args.input_model, args.framework, parse_transform(args.transform)) \ No newline at end of file diff --git a/model-optimizer/mo/main.py b/model-optimizer/mo/main.py index 7783bea36c8982..2a289f2d3b86c4 100644 --- a/model-optimizer/mo/main.py +++ b/model-optimizer/mo/main.py @@ -6,8 +6,8 @@ import logging as log import os import platform -import subprocess import sys +import subprocess import traceback from collections import OrderedDict from copy import deepcopy @@ -24,7 +24,8 @@ from mo.utils import import_extensions from mo.utils.cli_parser import get_placeholder_shapes, get_tuple_values, get_model_name, \ get_common_cli_options, get_caffe_cli_options, get_tf_cli_options, get_mxnet_cli_options, get_kaldi_cli_options, \ - get_onnx_cli_options, get_mean_scale_dictionary, parse_tuple_pairs, get_freeze_placeholder_values, get_meta_info + get_onnx_cli_options, get_mean_scale_dictionary, parse_tuple_pairs, get_freeze_placeholder_values, get_meta_info, \ + parse_transform, check_available_transforms from mo.utils.error import Error, FrameworkError from mo.utils.find_ie_version import find_ie_version from mo.utils.get_ov_update_message import get_ov_update_message @@ -33,7 +34,7 @@ from mo.utils.model_analysis import AnalysisResults from mo.utils.utils import refer_to_faq_msg from mo.utils.version import get_version, get_simplified_mo_version, get_simplified_ie_version -from mo.utils.versions_checker import check_requirements +from mo.utils.versions_checker import check_requirements # pylint: disable=no-name-in-module def replace_ext(name: str, old: str, new: str): @@ -141,14 +142,22 @@ def prepare_ir(argv: argparse.Namespace): # This try-except is additional reinsurance that the IE # dependency search does not break the MO pipeline try: - if not find_ie_version(silent=argv.silent) and not argv.silent: + argv.ie_is_available = find_ie_version(silent=argv.silent) + + if not argv.ie_is_available and not argv.silent: print("[ WARNING ] Could not find the Inference Engine Python API. At this moment, the Inference Engine dependency is not required, but will be required in future releases.") print("[ WARNING ] Consider building the Inference Engine Python API from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\"".format( "bat" if sys.platform == "windows" else "sh")) # If the IE was not found, it will not print the MO version, so we have to print it manually print("{}: \t{}".format("Model Optimizer version", get_version())) except Exception as e: - pass + argv.ie_is_available = False + + # This is just to check that transform key is valid and transformations are available + check_available_transforms(parse_transform(argv.transform), argv.ie_is_available) + + if argv.legacy_ir_generation and len(argv.transform) != 0: + raise Error("--legacy_ir_generation and --transform keys can not be used at the same time.") ret_code = check_requirements(framework=argv.framework) if ret_code: @@ -250,6 +259,10 @@ def emit_ir(graph: Graph, argv: argparse.Namespace): mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None input_names = deepcopy(graph.graph['input_names']) if 'input_names' in graph.graph else [] + # Remove temporary ie_is_available key from argv no to have it in IR + ie_is_available = argv.ie_is_available + del argv.ie_is_available + prepare_emit_ir(graph=graph, data_type=graph.graph['cmd_params'].data_type, output_dir=argv.output_dir, @@ -270,16 +283,16 @@ def emit_ir(graph: Graph, argv: argparse.Namespace): # This try-except is additional reinsurance that the IE # dependency search does not break the MO pipeline try: - if not argv.legacy_ir_generation and find_ie_version(silent=True): + if not argv.legacy_ir_generation and ie_is_available: path_to_offline_transformations = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'back', 'offline_transformations.py') status = subprocess.run([sys.executable, path_to_offline_transformations, "--input_model", orig_model_name, - "--framework", argv.framework], env=os.environ, timeout=10) + "--framework", argv.framework, + "--transform", argv.transform], env=os.environ) return_code = status.returncode - if return_code != 0 and not argv.silent: - log.error("offline_transformations return code {}".format(return_code), extra={'is_warning': True}) except Exception as e: + return_code = "failed" log.error(e, extra={'is_warning': True}) message = str(dict({ @@ -296,6 +309,14 @@ def emit_ir(graph: Graph, argv: argparse.Namespace): # produced by prepare_ir. This IR needs to be renamed from XXX_tmp.xml to XXX.xml suffixes = [".xml", ".bin", ".mapping"] if return_code != 0: + if len(argv.transform) != 0: + # Remove temporary IR before throwing exception + for suf in suffixes: + path_to_file = orig_model_name + "_tmp" + suf + if os.path.exists(path_to_file): + os.remove(path_to_file) + raise Error("Failed to apply transformations: {}".format(argv.transform)) + log.error("Using fallback to produce IR.", extra={'is_warning': True}) for suf in suffixes: # remove existing files @@ -400,3 +421,8 @@ def main(cli_parser: argparse.ArgumentParser, framework: str): telemetry.end_session() telemetry.force_shutdown(1.0) return 1 + + +if __name__ == "__main__": + from mo.utils.cli_parser import get_all_cli_parser + sys.exit(main(get_all_cli_parser(), None)) diff --git a/model-optimizer/mo/main_caffe.py b/model-optimizer/mo/main_caffe.py new file mode 100644 index 00000000000000..bcba5c8d611735 --- /dev/null +++ b/model-optimizer/mo/main_caffe.py @@ -0,0 +1,10 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_caffe_cli_parser + +if __name__ == "__main__": + from mo.main import main + sys.exit(main(get_caffe_cli_parser(), 'caffe')) diff --git a/model-optimizer/mo/main_kaldi.py b/model-optimizer/mo/main_kaldi.py new file mode 100644 index 00000000000000..15233333203adb --- /dev/null +++ b/model-optimizer/mo/main_kaldi.py @@ -0,0 +1,10 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_kaldi_cli_parser + +if __name__ == "__main__": + from mo.main import main + sys.exit(main(get_kaldi_cli_parser(), 'kaldi')) diff --git a/model-optimizer/mo/main_mxnet.py b/model-optimizer/mo/main_mxnet.py new file mode 100644 index 00000000000000..91cb19531592e5 --- /dev/null +++ b/model-optimizer/mo/main_mxnet.py @@ -0,0 +1,10 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_mxnet_cli_parser + +if __name__ == "__main__": + from mo.main import main + sys.exit(main(get_mxnet_cli_parser(), 'mxnet')) diff --git a/model-optimizer/mo/main_onnx.py b/model-optimizer/mo/main_onnx.py new file mode 100644 index 00000000000000..3bf882d65e9ed0 --- /dev/null +++ b/model-optimizer/mo/main_onnx.py @@ -0,0 +1,10 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_onnx_cli_parser + +if __name__ == "__main__": + from mo.main import main + sys.exit(main(get_onnx_cli_parser(), 'onnx')) diff --git a/model-optimizer/mo/main_tf.py b/model-optimizer/mo/main_tf.py new file mode 100644 index 00000000000000..3c55e4ac0e2d05 --- /dev/null +++ b/model-optimizer/mo/main_tf.py @@ -0,0 +1,10 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import sys + +from mo.utils.cli_parser import get_tf_cli_parser + +if __name__ == "__main__": + from mo.main import main + sys.exit(main(get_tf_cli_parser(), 'tf')) diff --git a/model-optimizer/mo/subprocess_main.py b/model-optimizer/mo/subprocess_main.py new file mode 100644 index 00000000000000..64c05a96441fa4 --- /dev/null +++ b/model-optimizer/mo/subprocess_main.py @@ -0,0 +1,42 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +import subprocess + +from mo.utils.versions_checker import check_python_version # pylint: disable=no-name-in-module + + +def subprocess_main(framework=None): + """ + Please keep this file compatible with python2 in order to check user python version. + + This function checks that Inference Engine Python API available and working as expected + and then in sub-process it executes main_.py files. Due to some OSs specifics we can't + just add paths to Python modules and libraries into current env. So to make Inference Engine + Python API to be available inside MO we need to use subprocess with new env. + """ + ret_code = check_python_version() + if ret_code: + sys.exit(ret_code) + + from mo.utils.find_ie_version import find_ie_version + find_ie_version(silent=True) + + mo_root_path = os.path.join(os.path.dirname(__file__), os.pardir) + + python_path_key = 'PYTHONPATH' + if python_path_key not in os.environ: + os.environ[python_path_key] = mo_root_path + else: + os.environ[python_path_key] = os.pathsep.join([os.environ[python_path_key], mo_root_path]) + + path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), + 'main_{}.py'.format(framework) if framework else 'main.py') + # python2 compatible code. Do not remove. + args = [sys.executable, path_to_main] + for arg in sys.argv[1:]: + args.append(arg) + status = subprocess.run(args, env=os.environ) + sys.exit(status.returncode) \ No newline at end of file diff --git a/model-optimizer/mo/utils/check_ie_bindings.py b/model-optimizer/mo/utils/check_ie_bindings.py index 6fee0828303c80..12c9e039e37f39 100644 --- a/model-optimizer/mo/utils/check_ie_bindings.py +++ b/model-optimizer/mo/utils/check_ie_bindings.py @@ -32,11 +32,20 @@ def send_telemetry(mo_version: str, message: str, event_type: str): def import_core_modules(silent: bool, path_to_module: str): + """ + This function checks that InferenceEngine Python API is available + and necessary python modules exists. So the next list of imports + must contain all IE/NG Python API imports that are used inside MO. + + :param silent: enables or disables logs printing to stdout + :param path_to_module: path where python API modules were found + :return: True if all imports were successful and False otherwise + """ try: - from openvino.inference_engine import IECore, get_version # pylint: disable=import-error - from openvino.offline_transformations import ApplyMOCTransformations, CheckAPI # pylint: disable=import-error + from openvino.inference_engine import get_version, read_network # pylint: disable=import-error + from openvino.offline_transformations import ApplyMOCTransformations, ApplyLowLatencyTransformation, GenerateMappingFile # pylint: disable=import-error - import openvino # pylint: disable=import-error + import openvino # pylint: disable=import-error if silent: return True @@ -46,15 +55,10 @@ def import_core_modules(silent: bool, path_to_module: str): print("\t- {}: \t{}".format("Inference Engine found in", os.path.dirname(openvino.__file__))) print("{}: \t{}".format("Inference Engine version", ie_version)) - print("{}: \t {}".format("Model Optimizer version", mo_version)) + print("{}: \t{}".format("Model Optimizer version", mo_version)) versions_mismatch = False - # MO and IE version have a small difference in the beginning of version because - # IE version also includes API version. For example: - # Inference Engine version: 2.1.custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9 - # Model Optimizer version: custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9 - # So to match this versions we skip IE API version. - if not re.match(r"^([0-9]+).([0-9]+).{}$".format(mo_version), ie_version): + if mo_version != ie_version: versions_mismatch = True extracted_mo_release_version = v.extract_release_version(mo_version) mo_is_custom = extracted_mo_release_version == (None, None) diff --git a/model-optimizer/mo/utils/cli_parser.py b/model-optimizer/mo/utils/cli_parser.py index b44e47fabf875c..86f35ca4dfeda7 100644 --- a/model-optimizer/mo/utils/cli_parser.py +++ b/model-optimizer/mo/utils/cli_parser.py @@ -6,7 +6,6 @@ import logging as log import os import re -import sys from collections import OrderedDict from itertools import zip_longest @@ -254,6 +253,14 @@ def get_common_cli_parser(parser: argparse.ArgumentParser = None): 'and biases are quantized to FP16.', choices=["FP16", "FP32", "half", "float"], default='float') + common_group.add_argument('--transform', + help='Apply additional transformations. ' + + 'Usage: "--transform transformation_name1[args],transformation_name2..." ' + + 'where [args] is key=value pairs separated by semicolon. ' + + 'Examples: "--transform LowLatency" or ' + + ' "--transform LowLatency[num_iterations=2]" ' + + 'Available transformations: "LowLatency"', + default="") common_group.add_argument('--disable_fusing', help='Turn off fusing of linear operations to Convolution', action=DeprecatedStoreTrue) @@ -1127,6 +1134,95 @@ def get_absolute_path(path_to_file: str) -> str: return file_path +def isfloat(value): + try: + float(value) + return True + except ValueError: + return False + + +def convert_string_to_real_type(value: str): + values = value.split(',') + for i in range(len(values)): + value = values[i] + if value.isdigit(): + values[i] = int(value) + elif isfloat(value): + values[i] = float(value) + + return values[0] if len(values) == 1 else values + + +def parse_transform(transform: str) -> list: + transforms = [] + + if len(transform) == 0: + return transforms + + all_transforms = re.findall(r"([a-zA-Z0-9]+)(\[([^\]]+)\])*(,|$)", transform) + + # Check that all characters were matched otherwise transform key value is invalid + key_len = len(transform) + for transform in all_transforms: + # In regexp we have 4 groups where 1st group - transformation_name, + # 2nd group - [args], + # 3rd group - args, <-- nested group + # 4th group - EOL + # And to check that regexp matched all string we decrease total length by the length of matched groups (1,2,4) + # In case if no arguments were given to transformation then 2nd and 3rd groups will be empty. + if len(transform) != 4: + raise Error("Unexpected transform key structure: {}".format(transform)) + key_len -= len(transform[0]) + len(transform[1]) + len(transform[3]) + + if key_len != 0: + raise Error("Unexpected transform key structure: {}".format(transform)) + + for transform in all_transforms: + name = transform[0] + args = transform[2] + + args_dict = {} + + if len(args) != 0: + for arg in args.split(';'): + m = re.match(r"^([_a-zA-Z]+)=(.+)$", arg) + if not m: + raise Error("Unrecognized attributes for transform key: {}".format(transform)) + + args_dict[m.group(1)] = convert_string_to_real_type(m.group(2)) + + transforms.append((name, args_dict)) + + return transforms + + +def check_available_transforms(transforms: list, ie_is_available: bool): + """ + This function check that transformations specified by user are available. + :param transforms: list of user specified transformations + :param ie_is_available: True if IE Python API is available and False if it is not + :return: raises an Error if IE or transformation is not available + """ + if not ie_is_available and len(transforms) != 0: + raise Error('Can not apply {} transformations due to missing Inference Engine Python API'.format( + ','.join([name for name, _ in transforms]))) + + from mo.back.offline_transformations import get_available_transformations + available_transforms = get_available_transformations() + + missing_transformations = [] + for name, _ in transforms: + if name not in available_transforms.keys(): + missing_transformations.append(name) + + if len(missing_transformations) != 0: + raise Error('Following transformations ({}) are not available. ' + 'List with available transformations ({})'.format(','.join(missing_transformations), + ','.join(available_transforms.keys()))) + return True + + def check_positive(value): try: int_value = int(value) diff --git a/model-optimizer/mo/utils/version.py b/model-optimizer/mo/utils/version.py index 6dfbdec286f38c..c449a2d9368b7a 100644 --- a/model-optimizer/mo/utils/version.py +++ b/model-optimizer/mo/utils/version.py @@ -65,7 +65,9 @@ def get_simplified_ie_version(env=dict(), version=None): version = subprocess.check_output([sys.executable, os.path.join(os.path.dirname(__file__), "ie_version.py")], timeout=2, env=env).strip().decode() except: return "ie not found" + + # To support legacy IE versions m = re.match(r"^([0-9]+).([0-9]+).(.*)", version) if m and len(m.groups()) == 3: return simplify_version(m.group(3)) - return "custom" + return simplify_version(version) diff --git a/model-optimizer/mo_caffe.py b/model-optimizer/mo_caffe.py index 83cfd28b96afdf..1b4c7c2add27b5 100755 --- a/model-optimizer/mo_caffe.py +++ b/model-optimizer/mo_caffe.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_caffe_cli_parser - - sys.exit(main(get_caffe_cli_parser(), 'caffe')) + from mo.subprocess_main import subprocess_main + subprocess_main(framework='caffe') diff --git a/model-optimizer/mo_kaldi.py b/model-optimizer/mo_kaldi.py index f6e4ca223f4696..ca69fa506da88f 100755 --- a/model-optimizer/mo_kaldi.py +++ b/model-optimizer/mo_kaldi.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_kaldi_cli_parser - - sys.exit(main(get_kaldi_cli_parser(), 'kaldi')) + from mo.subprocess_main import subprocess_main + subprocess_main(framework='kaldi') diff --git a/model-optimizer/mo_mxnet.py b/model-optimizer/mo_mxnet.py index 757debbcc06217..a54d127b00b38c 100755 --- a/model-optimizer/mo_mxnet.py +++ b/model-optimizer/mo_mxnet.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_mxnet_cli_parser - - sys.exit(main(get_mxnet_cli_parser(), 'mxnet')) + from mo.subprocess_main import subprocess_main + subprocess_main(framework='mxnet') diff --git a/model-optimizer/mo_onnx.py b/model-optimizer/mo_onnx.py index 92701d5f2066e1..52c6a253d6739b 100755 --- a/model-optimizer/mo_onnx.py +++ b/model-optimizer/mo_onnx.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_onnx_cli_parser - - sys.exit(main(get_onnx_cli_parser(), 'onnx')) + from mo.subprocess_main import subprocess_main + subprocess_main(framework='onnx') diff --git a/model-optimizer/mo_tf.py b/model-optimizer/mo_tf.py index e352958f771474..a6747a12b1eb94 100755 --- a/model-optimizer/mo_tf.py +++ b/model-optimizer/mo_tf.py @@ -3,16 +3,7 @@ # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys - -from mo.utils.versions_checker import check_python_version if __name__ == "__main__": - ret_code = check_python_version() - if ret_code: - sys.exit(ret_code) - - from mo.main import main - from mo.utils.cli_parser import get_tf_cli_parser - - sys.exit(main(get_tf_cli_parser(), 'tf')) + from mo.subprocess_main import subprocess_main + subprocess_main(framework='tf') diff --git a/model-optimizer/unit_tests/mo/utils/cli_parser_test.py b/model-optimizer/unit_tests/mo/utils/cli_parser_test.py index a4e80a274d9b7c..68a9994d2068c9 100644 --- a/model-optimizer/unit_tests/mo/utils/cli_parser_test.py +++ b/model-optimizer/unit_tests/mo/utils/cli_parser_test.py @@ -14,7 +14,7 @@ from mo.utils.cli_parser import get_placeholder_shapes, get_tuple_values, get_mean_scale_dictionary, get_model_name, \ parse_tuple_pairs, check_positive, writable_dir, readable_dirs, \ - readable_file, get_freeze_placeholder_values + readable_file, get_freeze_placeholder_values, parse_transform, check_available_transforms from mo.utils.error import Error @@ -898,3 +898,71 @@ def test_readable_file(self): def test_non_readable_file(self): with self.assertRaises(Error) as cm: readable_file(__class__.NOT_EXISTING_FILE) + + +class TransformChecker(unittest.TestCase): + def test_empty(self): + self.assertEqual(parse_transform(""), []) + + def test_single_pass(self): + self.assertEqual(parse_transform("LowLatency"), [("LowLatency", {})]) + + def test_single_pass_with_args(self): + self.assertEqual(parse_transform("LowLatency[num_iterations=2]"), + [("LowLatency", {"num_iterations": 2})]) + + def test_single_pass_with_multiple_args(self): + self.assertEqual(parse_transform("LowLatency[num_iterations=2;dummy_attr=3.14]"), + [("LowLatency", {"num_iterations": 2, "dummy_attr": 3.14})]) + + def test_multiple_passes_with_args(self): + self.assertEqual(parse_transform("LowLatency[num_iterations=2],DummyPass[type=ReLU]"), + [("LowLatency", {"num_iterations": 2}), + ("DummyPass", {"type": "ReLU"})]) + + def test_multiple_passes_with_args2(self): + self.assertEqual(parse_transform("LowLatency[num_iterations=2,3,4.15],DummyPass1,DummyPass2[types=ReLU,PReLU;values=1,2,3]"), + [("LowLatency", {"num_iterations": [2,3,4.15]}), + ("DummyPass1", {}), + ("DummyPass2", {"types": ["ReLU", "PReLU"], "values": [1,2,3]})]) + + def test_multiple_passes_no_args(self): + self.assertEqual(parse_transform("DummyPass,LowLatency2"), + [("DummyPass", {}), ("LowLatency2", {})]) + + def test_single_pass_neg(self): + self.assertRaises(Error, parse_transform, "LowLatency!") + + def test_multiple_passes_neg(self): + self.assertRaises(Error, parse_transform, "LowLatency;DummyPass") + + def test_single_pass_with_args_neg1(self): + self.assertRaises(Error, parse_transform, "LowLatency[=2]") + + def test_single_pass_with_args_neg2(self): + self.assertRaises(Error, parse_transform, "LowLatency[key=]") + + def test_single_pass_with_args_neg3(self): + self.assertRaises(Error, parse_transform, "LowLatency[]") + + def test_single_pass_with_args_neg4(self): + self.assertRaises(Error, parse_transform, "LowLatency[key=value;]") + + def test_single_pass_with_args_neg5(self): + self.assertRaises(Error, parse_transform, "LowLatency[value]") + + def test_single_pass_with_args_neg6(self): + self.assertRaises(Error, parse_transform, "LowLatency[key=value") + + @patch("mo.back.offline_transformations.get_available_transformations") + def test_check_low_latency_is_available(self, available_transformations): + available_transformations.return_value = {"LowLatency": None} + try: + check_available_transforms([("LowLatency" ,"")], True) + except Error as e: + self.assertTrue(False, "Exception \"{}\" is unexpected".format(e)) + + @patch("mo.back.offline_transformations.get_available_transformations") + def test_check_dummy_pass_is_available(self, available_transformations): + available_transformations.return_value = {"LowLatency": None} + self.assertRaises(Error, check_available_transforms, [("DummyPass", "")], True) diff --git a/model-optimizer/unit_tests/mo/utils/version_test.py b/model-optimizer/unit_tests/mo/utils/version_test.py index cf7cdeb069a443..706f6ca2b67128 100644 --- a/model-optimizer/unit_tests/mo/utils/version_test.py +++ b/model-optimizer/unit_tests/mo/utils/version_test.py @@ -52,11 +52,14 @@ def test_simplify_mo_version_custom(self, mock_open, mock_isfile): mock_open.return_value.__enter__ = mock_open self.assertEqual(get_simplified_mo_version(), "custom") - def test_simplify_ie_version_release(self): + def test_simplify_ie_version_release_legacy(self): self.assertEqual(get_simplified_ie_version(version="2.1.custom_releases/2021/3_4c8eae"), "2021.3") - def test_simplify_ie_version_release_neg(self): - self.assertEqual(get_simplified_ie_version(version="custom_releases/2021/3_4c8eae"), "custom") + def test_simplify_ie_version_release(self): + self.assertEqual(get_simplified_ie_version(version="custom_releases/2021/3_4c8eae"), "2021.3") + + def test_simplify_ie_version_custom_legacy(self): + self.assertEqual(get_simplified_ie_version(version="2.1.custom_my/branch/3_4c8eae"), "custom") def test_simplify_ie_version_custom(self): - self.assertEqual(get_simplified_ie_version(version="2.1.custom_my/branch/3_4c8eae"), "custom") \ No newline at end of file + self.assertEqual(get_simplified_ie_version(version="custom_my/branch/3_4c8eae"), "custom") \ No newline at end of file From f2f44ce16081579146a174f8641df2fe348a3ddd Mon Sep 17 00:00:00 2001 From: Anton Chetverikov Date: Wed, 12 May 2021 18:50:38 +0300 Subject: [PATCH 17/27] Add uint64 to data types maps (#5603) --- model-optimizer/mo/middle/passes/convert_data_type.py | 1 + model-optimizer/mo/utils/ir_engine/ir_engine.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/model-optimizer/mo/middle/passes/convert_data_type.py b/model-optimizer/mo/middle/passes/convert_data_type.py index 06fd2bb846b7bd..85ccbe85813e0f 100644 --- a/model-optimizer/mo/middle/passes/convert_data_type.py +++ b/model-optimizer/mo/middle/passes/convert_data_type.py @@ -42,6 +42,7 @@ class packed_I4(np.generic): 'int32': (np.int32, 'I32', 'i32'), 'int64': (np.int64, 'I64', 'i64'), 'bool': (np.bool, 'BOOL', 'boolean'), + 'uint64': (np.uint64, 'U64', 'u64'), # custom types 'U1': (packed_U1, 'U1', 'u1'), diff --git a/model-optimizer/mo/utils/ir_engine/ir_engine.py b/model-optimizer/mo/utils/ir_engine/ir_engine.py index 8d080d8729aff2..b124e6d30046a6 100644 --- a/model-optimizer/mo/utils/ir_engine/ir_engine.py +++ b/model-optimizer/mo/utils/ir_engine/ir_engine.py @@ -301,6 +301,7 @@ def __prepare_bin_attrs(xml_layer, in_port, tag, offset, size, precision): 'I4': (1, np.uint8), 'BOOL': (1, np.bool), 'BIN': (1, np.uint8), + 'U64': (8, np.uint64) } type_size, dtype = precision_map[precision] layer_attrs[tag] = (int(offset), int(size) // type_size, in_port, dtype) @@ -316,7 +317,7 @@ def __normalize_attrs(attrs: dict): """ normalized_attrs = {} for attr, value in attrs.items(): - value = value.replace('\"', '') + value = value.replace('\"', '').replace(' ', '') value = value.split(',') n_value = [] for val in value: From 4d7eeede3578ca048b3574e40f798db24278921b Mon Sep 17 00:00:00 2001 From: Mikhail Nosov Date: Wed, 12 May 2021 21:43:35 +0300 Subject: [PATCH 18/27] Add LoadNetwork(modelPath) to plugin interface (#5606) --- inference-engine/src/gna_plugin/gna_plugin.hpp | 2 ++ inference-engine/src/inference_engine/ie_core.cpp | 5 +++-- .../src/inference_engine/ie_plugin_cpp.hpp | 4 ++++ .../cpp_interfaces/impl/ie_plugin_internal.hpp | 6 ++++++ .../interface/ie_iplugin_internal.hpp | 10 ++++++++++ inference-engine/src/plugin_api/ie_icore.hpp | 15 +++++++++++++++ .../mocks/cpp_interfaces/interface/mock_icore.hpp | 2 ++ .../interface/mock_iinference_plugin.hpp | 2 ++ .../ie_infer_async_request_base_test.cpp | 8 ++++---- .../ie_memory_state_internal_test.cpp | 4 ++-- .../ie_executable_network_test.cpp | 5 +++-- 11 files changed, 53 insertions(+), 10 deletions(-) diff --git a/inference-engine/src/gna_plugin/gna_plugin.hpp b/inference-engine/src/gna_plugin/gna_plugin.hpp index 3e54c224746336..33df51710ba69e 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.hpp +++ b/inference-engine/src/gna_plugin/gna_plugin.hpp @@ -110,6 +110,8 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin { InferenceEngine::IExecutableNetworkInternal::Ptr LoadNetwork(const InferenceEngine::CNNNetwork &network, const std::map &config_map, InferenceEngine::RemoteContext::Ptr context) override { THROW_GNA_EXCEPTION << "Not implemented"; } + InferenceEngine::ExecutableNetwork LoadNetwork(const std::string &modelPath, + const std::map &config_map) override { THROW_GNA_EXCEPTION << "Not implemented"; } bool Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob &result); void SetCore(InferenceEngine::ICore*) noexcept override {} InferenceEngine::ICore* GetCore() const noexcept override {return nullptr;} diff --git a/inference-engine/src/inference_engine/ie_core.cpp b/inference-engine/src/inference_engine/ie_core.cpp index 1bc038ff07136a..14cd4d9a40ed2c 100644 --- a/inference-engine/src/inference_engine/ie_core.cpp +++ b/inference-engine/src/inference_engine/ie_core.cpp @@ -493,9 +493,8 @@ class Core::Impl : public ICore { return res; } - // TODO: In future this method can be added to ICore interface ExecutableNetwork LoadNetwork(const std::string& modelPath, const std::string& deviceName, - const std::map& config) { + const std::map& config) override { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::IE_LT, "Core::LoadNetwork::Path"); auto parsed = parseDeviceNameIntoConfig(deviceName, config); auto plugin = GetCPPPluginByName(parsed._deviceName); @@ -511,6 +510,8 @@ class Core::Impl : public ICore { auto cnnNetwork = ReadNetwork(modelPath, std::string()); res = LoadNetworkImpl(cnnNetwork, plugin, parsed._config, nullptr, hash, modelPath); } + } else if (cacheManager) { + res = plugin.LoadNetwork(modelPath, parsed._config); } else { auto cnnNetwork = ReadNetwork(modelPath, std::string()); res = LoadNetworkImpl(cnnNetwork, plugin, parsed._config, nullptr, {}, modelPath); diff --git a/inference-engine/src/inference_engine/ie_plugin_cpp.hpp b/inference-engine/src/inference_engine/ie_plugin_cpp.hpp index d87b16765d68ed..d40bdc478aae1b 100644 --- a/inference-engine/src/inference_engine/ie_plugin_cpp.hpp +++ b/inference-engine/src/inference_engine/ie_plugin_cpp.hpp @@ -88,6 +88,10 @@ class InferencePlugin { PLUGIN_CALL_STATEMENT(return ExecutableNetwork(actual->LoadNetwork(network, config, context), actual)); } + ExecutableNetwork LoadNetwork(const std::string& modelPath, const std::map& config) { + PLUGIN_CALL_STATEMENT(return actual->LoadNetwork(modelPath, config)); + } + QueryNetworkResult QueryNetwork(const CNNNetwork& network, const std::map& config) const { QueryNetworkResult res; diff --git a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp index 2621c73a340c52..dcaf4a1e529f65 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp @@ -72,6 +72,12 @@ class InferencePluginInternal : public IInferencePlugin { return impl; } + ExecutableNetwork LoadNetwork(const std::string& modelPath, + const std::map& config) override { + auto cnnNet = GetCore()->ReadNetwork(modelPath, std::string()); + return GetCore()->LoadNetwork(cnnNet, GetName(), config); + } + IExecutableNetworkInternal::Ptr ImportNetwork(const std::string& modelFileName, const std::map& config) override { (void)modelFileName; diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 894605d3d79f4b..18f4658d6a4cc4 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -166,6 +166,16 @@ class IInferencePlugin : public std::enable_shared_from_this { virtual std::shared_ptr LoadNetwork(const CNNNetwork& network, const std::map& config, RemoteContext::Ptr context) = 0; + + /** + * @brief Creates an executable network from model file path + * @param modelPath A path to model + * @param config A string-string map of config parameters relevant only for this load operation + * @return Created Executable Network object + */ + virtual ExecutableNetwork LoadNetwork(const std::string& modelPath, + const std::map& config) = 0; + /** * @brief Registers extension within plugin * @param extension - pointer to already loaded extension diff --git a/inference-engine/src/plugin_api/ie_icore.hpp b/inference-engine/src/plugin_api/ie_icore.hpp index d8acf83764042e..6c35277726f386 100644 --- a/inference-engine/src/plugin_api/ie_icore.hpp +++ b/inference-engine/src/plugin_api/ie_icore.hpp @@ -66,6 +66,21 @@ class ICore { virtual ExecutableNetwork LoadNetwork(const CNNNetwork& network, const std::string& deviceName, const std::map& config = {}) = 0; + /** + * @brief Creates an executable network from a model file. + * + * Users can create as many networks as they need and use + * them simultaneously (up to the limitation of the hardware resources) + * + * @param modelPath Path to model + * @param deviceName Name of device to load network to + * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * operation + * @return An executable network reference + */ + virtual ExecutableNetwork LoadNetwork(const std::string& modelPath, const std::string& deviceName, + const std::map& config) = 0; + /** * @brief Creates an executable network from a previously exported network * @param networkModel network model stream diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp index c1adf355f16582..7e264217bf999f 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp @@ -18,6 +18,8 @@ class MockICore : public InferenceEngine::ICore { const InferenceEngine::CNNNetwork&, const std::string&, const std::map&)); MOCK_METHOD3(LoadNetwork, InferenceEngine::ExecutableNetwork( const InferenceEngine::CNNNetwork&, const InferenceEngine::RemoteContext::Ptr &, const std::map&)); + MOCK_METHOD3(LoadNetwork, InferenceEngine::ExecutableNetwork( + const std::string &, const std::string &, const std::map&)); MOCK_METHOD3(ImportNetwork, InferenceEngine::ExecutableNetwork( std::istream&, const std::string&, const std::map&)); diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp index a36e1edcf93924..7f450f660f37a6 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp @@ -15,6 +15,8 @@ class MockIInferencePlugin : public InferenceEngine::IInferencePlugin { MOCK_METHOD1(AddExtension, void(InferenceEngine::IExtensionPtr)); MOCK_METHOD2(LoadNetwork, std::shared_ptr( const InferenceEngine::CNNNetwork&, const std::map&)); + MOCK_METHOD2(LoadNetwork, InferenceEngine::ExecutableNetwork( + const std::string&, const std::map&)); MOCK_METHOD2(ImportNetwork, std::shared_ptr( const std::string&, const std::map&)); MOCK_METHOD1(SetConfig, void(const std::map &)); diff --git a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_infer_async_request_base_test.cpp b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_infer_async_request_base_test.cpp index a848d66fda6e1a..73ecba07d82441 100644 --- a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_infer_async_request_base_test.cpp +++ b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_infer_async_request_base_test.cpp @@ -199,9 +199,9 @@ class InferRequestTests : public ::testing::Test { mockIExeNet = std::make_shared(); ON_CALL(*mockIExeNet, CreateInferRequest()).WillByDefault(Return(mock_request)); std::unique_ptr mockIPluginPtr{new MockIInferencePlugin}; - ON_CALL(*mockIPluginPtr, LoadNetwork(_, _)).WillByDefault(Return(mockIExeNet)); + ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast(_), _)).WillByDefault(Return(mockIExeNet)); plugin = InferenceEngine::InferencePlugin{InferenceEngine::details::SOPointer{mockIPluginPtr.release()}}; - exeNetwork = plugin.LoadNetwork({}, {}); + exeNetwork = plugin.LoadNetwork(CNNNetwork{}, {}); request = exeNetwork.CreateInferRequest(); _incorrectName = "incorrect_name"; _inputName = MockNotEmptyICNNNetwork::INPUT_BLOB_NAME; @@ -223,9 +223,9 @@ class InferRequestTests : public ::testing::Test { auto mockIExeNet = std::make_shared(); ON_CALL(*mockIExeNet, CreateInferRequest()).WillByDefault(Return(mockInferRequestInternal)); std::unique_ptr mockIPluginPtr{new MockIInferencePlugin}; - ON_CALL(*mockIPluginPtr, LoadNetwork(_, _)).WillByDefault(Return(mockIExeNet)); + ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast(_), _)).WillByDefault(Return(mockIExeNet)); auto plugin = InferenceEngine::InferencePlugin{InferenceEngine::details::SOPointer{mockIPluginPtr.release()}}; - auto exeNetwork = plugin.LoadNetwork({}, {}); + auto exeNetwork = plugin.LoadNetwork(CNNNetwork{}, {}); return exeNetwork.CreateInferRequest(); } diff --git a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp index c37408606d2802..0557ed203b1337 100644 --- a/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp +++ b/inference-engine/tests/unit/inference_engine/cpp_interfaces/ie_memory_state_internal_test.cpp @@ -36,9 +36,9 @@ class VariableStateTests : public ::testing::Test { mockVariableStateInternal = make_shared(); ON_CALL(*mockExeNetworkInternal, CreateInferRequest()).WillByDefault(Return(mockInferRequestInternal)); std::unique_ptr mockIPluginPtr{new MockIInferencePlugin}; - ON_CALL(*mockIPluginPtr, LoadNetwork(_, _)).WillByDefault(Return(mockExeNetworkInternal)); + ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast(_), _)).WillByDefault(Return(mockExeNetworkInternal)); plugin = InferenceEngine::InferencePlugin{InferenceEngine::details::SOPointer{mockIPluginPtr.release()}}; - net = plugin.LoadNetwork({}, {}); + net = plugin.LoadNetwork(CNNNetwork{}, {}); req = net.CreateInferRequest(); } }; diff --git a/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp b/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp index e205be6cd92f4d..1df88845775f3f 100644 --- a/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp +++ b/inference-engine/tests/unit/inference_engine/ie_executable_network_test.cpp @@ -20,6 +20,7 @@ #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp" using testing::_; +using testing::MatcherCast; using testing::Throw; using testing::Ref; using testing::Return; @@ -52,9 +53,9 @@ class ExecutableNetworkTests : public ::testing::Test { virtual void SetUp() { mockIExeNet = std::make_shared(); std::unique_ptr mockIPluginPtr{new MockIInferencePlugin}; - ON_CALL(*mockIPluginPtr, LoadNetwork(_, _)).WillByDefault(Return(mockIExeNet)); + ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast(_), _)).WillByDefault(Return(mockIExeNet)); plugin = InferenceEngine::InferencePlugin{InferenceEngine::details::SOPointer{mockIPluginPtr.release()}}; - exeNetwork = plugin.LoadNetwork({}, {}); + exeNetwork = plugin.LoadNetwork(CNNNetwork{}, {}); } }; From 0383b44776cba46383dd804286977854fd3bfd46 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Wed, 12 May 2021 22:49:40 +0300 Subject: [PATCH 19/27] added RNN-t conversion doc (#5139) * added RNN-t conversion doc * applied review comments * a couple of corrections * added pip3 everywhere * fixed a typo * applied review comments * title name fix * applied Tatiana's comments round 2 * fixed a typo for 'inference' * fixed typo in MLCommons name * moved to PyTorch* specific, applied comments * pytorch_specific typo * froze MLCommons revision to r1.0; fixed typo in MLCommons relative path --- .../pytorch_specific/Convert_RNNT.md | 107 ++++++++++++++++++ docs/doxygen/ie_docs.xml | 1 + 2 files changed, 108 insertions(+) create mode 100644 docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md diff --git a/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md new file mode 100644 index 00000000000000..a58e886d4f4230 --- /dev/null +++ b/docs/MO_DG/prepare_model/convert_model/pytorch_specific/Convert_RNNT.md @@ -0,0 +1,107 @@ +# Convert PyTorch\* RNN-T Model to the Intermediate Representation (IR) {#openvino_docs_MO_DG_prepare_model_convert_model_pytorch_specific_Convert_RNNT} + +This instruction covers conversion of RNN-T model from [MLCommons](https://github.com/mlcommons) repository. Follow +the steps below to export a PyTorch* model into ONNX* before converting it to IR: + +**Step 1**. Clone RNN-T PyTorch implementation from MLCommons repository (revision r1.0). Make a shallow clone to pull +only RNN-T model without full repository. If you already have a full repository, skip this and go to **Step 2**: +```bash +git clone -b r1.0 -n https://github.com/mlcommons/inference rnnt_for_openvino --depth 1 +cd rnnt_for_openvino +git checkout HEAD speech_recognition/rnnt +``` + +**Step 2**. If you already have a full clone of MLCommons inference repository, create a folder for +pretrained PyTorch model, where conversion into IR will take place. You will also need to specify the path to +your full clone at **Step 5**. Skip this step if you have a shallow clone. + +```bash +mkdir rnnt_for_openvino +cd rnnt_for_openvino +``` + +**Step 3**. Download pretrained weights for PyTorch implementation from https://zenodo.org/record/3662521#.YG21DugzZaQ. +For UNIX*-like systems you can use wget: +```bash +wget https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt +``` +The link was taken from `setup.sh` in the `speech_recoginitin/rnnt` subfolder. You will get exactly the same weights as +if you were following the steps from https://github.com/mlcommons/inference/tree/master/speech_recognition/rnnt. + +**Step 4**. Install required python* packages: +```bash +pip3 install torch toml +``` + +**Step 5**. Export RNN-T model into ONNX with the script below. Copy the code below into a file named +`export_rnnt_to_onnx.py` and run it in the current directory `rnnt_for_openvino`: + +> **NOTE**: If you already have a full clone of MLCommons inference repository, you need to +> specify `mlcommons_inference_path` variable. + +```python +import toml +import torch +import sys + + +def load_and_migrate_checkpoint(ckpt_path): + checkpoint = torch.load(ckpt_path, map_location="cpu") + migrated_state_dict = {} + for key, value in checkpoint['state_dict'].items(): + key = key.replace("joint_net", "joint.net") + migrated_state_dict[key] = value + del migrated_state_dict["audio_preprocessor.featurizer.fb"] + del migrated_state_dict["audio_preprocessor.featurizer.window"] + return migrated_state_dict + + +mlcommons_inference_path = './' # specify relative path for MLCommons inferene +checkpoint_path = 'DistributedDataParallel_1576581068.9962234-epoch-100.pt' +config_toml = 'speech_recognition/rnnt/pytorch/configs/rnnt.toml' +config = toml.load(config_toml) +rnnt_vocab = config['labels']['labels'] +sys.path.insert(0, mlcommons_inference_path + 'speech_recognition/rnnt/pytorch') + +from model_separable_rnnt import RNNT + +model = RNNT(config['rnnt'], len(rnnt_vocab) + 1, feature_config=config['input_eval']) +model.load_state_dict(load_and_migrate_checkpoint(checkpoint_path)) + +seq_length, batch_size, feature_length = 157, 1, 240 +inp = torch.randn([seq_length, batch_size, feature_length]) +feature_length = torch.LongTensor([seq_length]) +x_padded, x_lens = model.encoder(inp, feature_length) +torch.onnx.export(model.encoder, (inp, feature_length), "rnnt_encoder.onnx", opset_version=12, + input_names=['input.1', '1'], dynamic_axes={'input.1': {0: 'seq_len', 1: 'batch'}}) + +symbol = torch.LongTensor([[20]]) +hidden = torch.randn([2, batch_size, 320]), torch.randn([2, batch_size, 320]) +g, hidden = model.prediction.forward(symbol, hidden) +torch.onnx.export(model.prediction, (symbol, hidden), "rnnt_prediction.onnx", opset_version=12, + input_names=['input.1', '1', '2'], + dynamic_axes={'input.1': {0: 'batch'}, '1': {1: 'batch'}, '2': {1: 'batch'}}) + +f = torch.randn([batch_size, 1, 1024]) +model.joint.forward(f, g) +torch.onnx.export(model.joint, (f, g), "rnnt_joint.onnx", opset_version=12, + input_names=['0', '1'], dynamic_axes={'0': {0: 'batch'}, '1': {0: 'batch'}}) +``` + +```bash +python3 export_rnnt_to_onnx.py +``` + +After completing this step, the files rnnt_encoder.onnx, rnnt_prediction.onnx, and rnnt_joint.onnx will be saved in +the current directory. + +**Step 6**. Run the conversion command: + +```bash +python3 {path_to_openvino}/mo.py --input_model rnnt_encoder.onnx --input "input.1[157 1 240],1->157" +python3 {path_to_openvino}/mo.py --input_model rnnt_prediction.onnx --input "input.1[1 1],1[2 1 320],2[2 1 320]" +python3 {path_to_openvino}/mo.py --input_model rnnt_joint.onnx --input "0[1 1 1024],1[1 1 320]" +``` +Please note that hardcoded value for sequence length = 157 was taken from the MLCommons, but conversion to IR preserves +network [reshapeability](../../../../IE_DG/ShapeInference.md); this means you can change input shapes manually to any value either during conversion or +inference. diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index 688e80d0215d78..76fe06457d6a3d 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -56,6 +56,7 @@ limitations under the License. + From f928f7fc56afce6ec7b6889aaf13ab8d3b21e8d0 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Thu, 13 May 2021 06:44:57 +0200 Subject: [PATCH 20/27] DeformablePSROIPoolig reference implementation (#5116) * Reference implementation init * Backend tests * Single layer tests * Update offset range in layer tests * Align int types with ng op * Update spatial bins type * Type update * Fix sub bin calculation in mkldnn plugin * Update summarize py file * Align result type * Refactoring * Apply review comments * Add serialize layar tests * Adjust int comparison * Adjust code style * Use clamp reference * Unify style * Additional check for negative output dim * Set tensor output shape in evaluate * Add visit attributes test * Small refactor * Code style (namespace comments) * Fix CommonTestsUtils::fill_data_roi usage. This function was generalized in PR #5432 and its siganutre has changed. * Update licenese header with mention about original authors. * Replace MIT SPDX full license name with short identifier. * Fix sub bin calculation in mkldnn plugin Co-authored-by: jdanieck --- .../nodes/mkldnn_psroi_pooling_node.cpp | 4 +- .../single_layer/deformable_psroi_pooling.cpp | 35 + .../deformable_psroi_pooling.cpp | 52 ++ .../deformable_psroi_pooling.hpp | 15 + .../single_layer/deformable_psroi_pooling.hpp | 48 ++ .../single_layer/deformable_psroi_pooling.cpp | 131 ++++ .../layer_tests_summary/utils/constants.py | 1 + .../reference/deformable_psroi_pooling.hpp | 231 ++++++ .../core/src/op/deformable_psroi_pooling.cpp | 3 + ngraph/test/CMakeLists.txt | 2 + .../backend/deformable_psroi_pooling.in.cpp | 689 ++++++++++++++++++ .../runtime/interpreter/evaluates_map.cpp | 51 ++ .../runtime/interpreter/opset_int_tbl.hpp | 1 + .../type_prop/deformable_psroi_pooling.cpp | 27 + .../visitors/op/deformable_psroi_pooling.cpp | 46 ++ 15 files changed, 1334 insertions(+), 2 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/deformable_psroi_pooling.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp create mode 100644 ngraph/core/reference/include/ngraph/runtime/reference/deformable_psroi_pooling.hpp create mode 100644 ngraph/test/backend/deformable_psroi_pooling.in.cpp create mode 100644 ngraph/test/visitors/op/deformable_psroi_pooling.cpp diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_psroi_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_psroi_pooling_node.cpp index 521dd248bdd8d2..393ef27921a7e6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_psroi_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_psroi_pooling_node.cpp @@ -429,8 +429,8 @@ void MKLDNNPSROIPoolingNode::executeBilinearDeformable(const inputType *srcData, float binSizeH = roiHeight / static_cast(pooledHeight); float binSizeW = roiWidth / static_cast(pooledWidth); - float subBinSizeH = binSizeH / static_cast(spatialBinsX); - float subBinSizeW = binSizeW / static_cast(spatialBinsY); + float subBinSizeH = binSizeH / static_cast(spatialBinsY); + float subBinSizeW = binSizeW / static_cast(spatialBinsX); int partH = h * partSize / pooledHeight; int partW = w * partSize / pooledWidth; diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/deformable_psroi_pooling.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/deformable_psroi_pooling.cpp new file mode 100644 index 00000000000000..ffdedd39905a7b --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/deformable_psroi_pooling.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/single_layer/deformable_psroi_pooling.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + TEST_P(DeformablePSROIPoolingLayerTest, Serialize) { + Serialize(); + } + + const auto deformablePSROIParams = ::testing::Combine( + ::testing::ValuesIn(std::vector>{{3, 8, 16, 16}, {1, 8, 67, 32}}), // data input shape + ::testing::Values(std::vector{10, 5}), // rois input shape + // Empty offsets shape means test without optional third input + ::testing::ValuesIn(std::vector>{{}, {10, 2, 2, 2}}), // offsets input shape + ::testing::Values(2), // output_dim + ::testing::Values(2), // group_size + ::testing::ValuesIn(std::vector{1.0, 0.5, 0.0625}), // spatial scale + ::testing::ValuesIn(std::vector>{{1, 1}, {2, 2}, {3, 3}, {2, 3}}), // spatial_bins_x_y + ::testing::ValuesIn(std::vector{0.0, 0.01, 0.5}), // trans_std + ::testing::Values(2)); // part_size + + const auto deformablePSROICases_test_params = ::testing::Combine( + deformablePSROIParams, + ::testing::Values(InferenceEngine::Precision::FP32), // Net precision + ::testing::Values(CommonTestUtils::DEVICE_CPU)); // Device name + + INSTANTIATE_TEST_CASE_P(smoke_TestsDeformablePSROIPooling, DeformablePSROIPoolingLayerTest, deformablePSROICases_test_params, + DeformablePSROIPoolingLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp new file mode 100644 index 00000000000000..4a0a16c3822004 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/deformable_psroi_pooling.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/deformable_psroi_pooling.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + const auto deformablePSROIParams = ::testing::Combine( + ::testing::ValuesIn(std::vector>{{3, 8, 16, 16}, {1, 8, 67, 32}}), // data input shape + ::testing::Values(std::vector{10, 5}), // rois input shape + // Empty offsets shape means test without optional third input + ::testing::ValuesIn(std::vector>{{}, {10, 2, 2, 2}}), // offsets input shape + ::testing::Values(2), // output_dim + ::testing::Values(2), // group_size + ::testing::ValuesIn(std::vector{1.0, 0.5, 0.0625}), // spatial scale + ::testing::ValuesIn(std::vector>{{1, 1}, {2, 2}, {3, 3}, {2, 3}}), // spatial_bins_x_y + ::testing::ValuesIn(std::vector{0.0, 0.01, 0.5}), // trans_std + ::testing::Values(2)); + + const auto deformablePSROICases_test_params = ::testing::Combine( + deformablePSROIParams, + ::testing::Values(InferenceEngine::Precision::FP32), // Net precision + ::testing::Values(CommonTestUtils::DEVICE_CPU)); // Device name + + INSTANTIATE_TEST_CASE_P(smoke_TestsDeformablePSROIPooling, DeformablePSROIPoolingLayerTest, deformablePSROICases_test_params, + DeformablePSROIPoolingLayerTest::getTestCaseName); + + + const auto deformablePSROIParams_advanced = ::testing::Combine( + ::testing::ValuesIn(std::vector>{{2, 441, 63, 38}}), // data input shape + ::testing::Values(std::vector{30, 5}), // rois input shape + ::testing::Values(std::vector{30, 2, 3, 3}), // offsets input shape + ::testing::Values(49), // output_dim + ::testing::Values(3), // group_size + ::testing::ValuesIn(std::vector{0.0625}), // spatial scale + ::testing::ValuesIn(std::vector>{{4, 4}}), // spatial_bins_x_y + ::testing::ValuesIn(std::vector{0.1}), // trans_std + ::testing::Values(3)); // part_size + + const auto deformablePSROICases_test_params_advanced = ::testing::Combine( + deformablePSROIParams_advanced, + ::testing::Values(InferenceEngine::Precision::FP32), // Net precision + ::testing::Values(CommonTestUtils::DEVICE_CPU)); // Device name + + INSTANTIATE_TEST_CASE_P(smoke_TestsDeformablePSROIPooling_advanced, DeformablePSROIPoolingLayerTest, deformablePSROICases_test_params_advanced, + DeformablePSROIPoolingLayerTest::getTestCaseName); +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000000..34773332376f16 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_layer/deformable_psroi_pooling.hpp" + +namespace LayerTestsDefinitions { + +TEST_P(DeformablePSROIPoolingLayerTest, CompareWithRefs) { + Run(); +} + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000000..f870e11aba8ee6 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +#include "shared_test_classes/base/layer_test_utils.hpp" + +namespace LayerTestsDefinitions { + +using deformablePSROISpecificParams = std::tuple< + std::vector, // data input shape + std::vector, // rois input shape + std::vector, // trans input shape + int64_t, // output_dim + int64_t, // group_size + float, // spatial_scale + std::vector, // spatial_bins_x_y + float, // trans_std + int64_t>; // part_size + +using deformablePSROILayerTestParams = std::tuple< + deformablePSROISpecificParams, + InferenceEngine::Precision, // Net precision + LayerTestsUtils::TargetDevice>; // Device name + +class DeformablePSROIPoolingLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { + public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void GenerateInputs() override; + + protected: + void SetUp() override; + + private: + float spatialScale_; + }; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp new file mode 100644 index 00000000000000..4f8fa43b06ae7d --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp @@ -0,0 +1,131 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/deformable_psroi_pooling.hpp" + + +namespace LayerTestsDefinitions { + + std::string DeformablePSROIPoolingLayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::vector dataShape; + std::vector roisShape; + std::vector offsetsShape; + int64_t outputDim; + int64_t groupSize; + float spatialScale; + std::vector spatialBinsXY; + float trans_std; + int64_t part_size; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + deformablePSROISpecificParams opParams; + + std::tie(opParams, netPrecision, targetDevice) = obj.param; + std::tie(dataShape, roisShape, offsetsShape, outputDim, groupSize, spatialScale, spatialBinsXY, + trans_std, part_size) = opParams; + + std::ostringstream result; + + result << "data_shape=" << CommonTestUtils::vec2str(dataShape) << "_"; + result << "rois_shape=" << CommonTestUtils::vec2str(roisShape) << "_"; + result << "offsets_shape=" << CommonTestUtils::vec2str(offsetsShape) << "_"; + result << "out_dim=" << outputDim << "_"; + result << "group_size=" << groupSize << "_"; + result << "scale=" << spatialScale << "_"; + result << "bins_x=" << spatialBinsXY[0] << "_"; + result << "bins_y=" << spatialBinsXY[1] << "_"; + result << "trans_std=" << trans_std << "_"; + result << "part_size=" << part_size << "_"; + result << "prec=" << netPrecision.name() << "_"; + result << "dev=" << targetDevice; + return result.str(); + } + + void DeformablePSROIPoolingLayerTest::GenerateInputs() { + auto data_input_shape = cnnNetwork.getInputShapes().begin()->second; + const auto batch_distrib = data_input_shape[0] - 1; + const auto height = data_input_shape[2] / spatialScale_; + const auto width = data_input_shape[3] / spatialScale_; + + size_t it = 0; + for (const auto &input : cnnNetwork.getInputsInfo()) { + const auto &info = input.second; + InferenceEngine::Blob::Ptr blob; + + if (it == 0) { + blob = GenerateInput(*info); + } else if (it == 1) { + blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + CommonTestUtils::fill_data_roi(blob, batch_distrib, + height, width, 1.0f, true); + } else { + blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + std::vector offset_data = CommonTestUtils::generate_float_numbers(blob->size(), -0.9, 0.9); + CommonTestUtils::fill_data_float_array(blob, &offset_data[0], blob->size()); + } + inputs.push_back(blob); + it++; + } + } + + void DeformablePSROIPoolingLayerTest::SetUp() { + std::vector dataShape; + std::vector roisShape; + std::vector offsetsShape; + int64_t outputDim; + int64_t groupSize; + std::string mode = "bilinear_deformable"; + std::vector spatialBinsXY; + float trans_std; + int64_t part_size; + InferenceEngine::Precision netPrecision; + deformablePSROISpecificParams opParams; + + std::tie(opParams, netPrecision, targetDevice) = this->GetParam(); + std::tie(dataShape, roisShape, offsetsShape, outputDim, groupSize, spatialScale_, spatialBinsXY, + trans_std, part_size) = opParams; + + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + ngraph::ParameterVector params; + ngraph::OutputVector inputs; + std::shared_ptr defomablePSROIPooling; + + if (offsetsShape.empty()) { // Test without optional third input (offsets) + params = ngraph::builder::makeParams(ngPrc, {dataShape, roisShape}); + inputs = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + defomablePSROIPooling = std::make_shared(inputs[0], + inputs[1], + outputDim, + spatialScale_, + groupSize, + mode, + spatialBinsXY[0], + spatialBinsXY[1], + trans_std, + part_size); + } else { + params = ngraph::builder::makeParams(ngPrc, {dataShape, roisShape, offsetsShape}); + inputs = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + defomablePSROIPooling = std::make_shared(inputs[0], + inputs[1], + inputs[2], + outputDim, + spatialScale_, + groupSize, + mode, + spatialBinsXY[0], + spatialBinsXY[1], + trans_std, + part_size); + } + + ngraph::ResultVector results{std::make_shared(defomablePSROIPooling)}; + function = std::make_shared(results, params, "deformable_psroi_pooling"); + } +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py index 123ba098c7e2b1..f2d02038c9a016 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py @@ -20,6 +20,7 @@ 'Convolution-1', 'Constant-1', 'DeformableConvolution-1', + 'DeformablePSROIPooling-1', 'DetectionOutput-1', 'Divide-1', 'ExperimentalDetectronDetectionOutput-6', diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/deformable_psroi_pooling.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/deformable_psroi_pooling.hpp new file mode 100644 index 00000000000000..8974687724c099 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/deformable_psroi_pooling.hpp @@ -0,0 +1,231 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +// DeformablePSROIPooling implementation was inspired by +// https://github.com/msracver/Deformable-ConvNets +// Copyright (c) 2017 Microsoft +// SPDX-License-Identifier: MIT + +#pragma once + +#include +#include +#include +#include + +#include "clamp.hpp" +#include "ngraph/shape.hpp" + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void deformable_psroi_pooling(const T* data_input, + const Shape& data_input_shape, + const T* rois_input, + const Shape& rois_input_shape, + const T* offsets_input, + const Shape& offsets_input_shape, + T* output, + const Shape& output_shape, + const std::string& mode_str, + const float spatial_scale, + const int64_t spatial_bins_x, + const int64_t spatial_bins_y, + const float trans_std, + const int64_t part_size) + { + const size_t channels_in = data_input_shape[1]; + const size_t height_in = data_input_shape[2]; + const size_t width_in = data_input_shape[3]; + + const size_t rois_count = output_shape[0]; + const size_t channels_out = output_shape[1]; + const size_t height_out = output_shape[2]; + const size_t width_out = output_shape[3]; + + std::fill(output, output + shape_size(output_shape), T{0}); + + // Single ROI is described by (batch_id, x1, y1, x2, y2) + const size_t roi_attrs_count = 5; + + for (size_t roi_idx = 0; roi_idx < rois_count; ++roi_idx) + { + // Pointer to the beginning of the ROI coords tuple + const T* roi = rois_input + roi_idx * roi_attrs_count; + + // Index of the corresponding input batch + int64_t roi_batch_id = roi[0]; + if (roi_batch_id < 0) + continue; + + // Left top ROI corner + const float roi_x1 = + static_cast(std::round(roi[1])) * spatial_scale - 0.5f; + const float roi_y1 = + static_cast(std::round(roi[2])) * spatial_scale - 0.5f; + // Right down ROI corner + const float roi_x2 = + static_cast(std::round(roi[3]) + 1.0f) * spatial_scale - 0.5f; + const float roi_y2 = + static_cast(std::round(roi[4]) + 1.0f) * spatial_scale - 0.5f; + + const float roi_width = std::max(roi_x2 - roi_x1, 0.1f); + const float roi_height = std::max(roi_y2 - roi_y1, 0.1f); + + const float bin_width = roi_width / static_cast(width_out); + const float bin_height = roi_height / static_cast(height_out); + + size_t c_idx_in = 0; + for (size_t c_idx_out = 0; c_idx_out < channels_out; ++c_idx_out) + { + for (size_t h_idx_out = 0; h_idx_out < height_out; ++h_idx_out) + { + // Next bin is taken from the next input channel + for (size_t w_idx_out = 0; w_idx_out < width_out; + ++w_idx_out, ++c_idx_in) + { + const size_t out_value_idx = + ((roi_idx * channels_out + c_idx_out) * height_out + + h_idx_out) * + width_out + + w_idx_out; + + // Left top corner of bin + float bin_x1_idx = roi_x1 + w_idx_out * bin_width; + float bin_y1_idx = roi_y1 + h_idx_out * bin_height; + + // Take offsets from optional input + if (offsets_input != nullptr && offsets_input_shape.size() == 4) + { + const auto num_coords = 2; // (x, y) + const size_t coords_sub_channels = + offsets_input_shape[1] / num_coords; + const size_t class_sub_channels = + channels_out / coords_sub_channels; + const size_t roi_channel_idx = c_idx_out / class_sub_channels; + + const size_t off_bin_w_idx = w_idx_out * part_size / width_out; + const size_t off_bin_h_idx = h_idx_out * part_size / height_out; + + const size_t offsets_channel_idx = + (roi_idx * coords_sub_channels + roi_channel_idx) * + num_coords; + + const size_t x_offset_idx = + (offsets_channel_idx * part_size + off_bin_h_idx) * + part_size + + off_bin_w_idx; + + const size_t y_offset_idx = + ((offsets_channel_idx + 1) * part_size + off_bin_h_idx) * + part_size + + off_bin_w_idx; + + T x_offset_value = offsets_input[x_offset_idx]; + T y_offset_value = offsets_input[y_offset_idx]; + + x_offset_value *= trans_std; + y_offset_value *= trans_std; + + // Move bin position by normalized offset values + bin_x1_idx += (x_offset_value * roi_width); + bin_y1_idx += (y_offset_value * roi_height); + } + + // Each bin is divided into sub-bins + // Values of sub-bins are calculated by bilinear interpolation + // Value of single bin is average of its sub-bins + const float sub_bin_width = + bin_width / static_cast(spatial_bins_x); + const float sub_bin_height = + bin_height / static_cast(spatial_bins_y); + + T sub_bins_val_sum = 0; + size_t legit_sub_bin_count = 0; + for (int sub_bin_h_idx = 0; sub_bin_h_idx < spatial_bins_y; + ++sub_bin_h_idx) + { + float sub_bin_y1_idx = + bin_y1_idx + sub_bin_h_idx * sub_bin_height; + if (sub_bin_y1_idx < -0.5 || sub_bin_y1_idx > height_in - 0.5) + continue; + + for (int sub_bin_w_idx = 0; sub_bin_w_idx < spatial_bins_x; + ++sub_bin_w_idx) + { + float sub_bin_x1_idx = + bin_x1_idx + sub_bin_w_idx * sub_bin_width; + if (sub_bin_x1_idx < -0.5 || + sub_bin_x1_idx > width_in - 0.5) + continue; + + clamp(&sub_bin_x1_idx, + &sub_bin_x1_idx, + 0.f, + width_in - 1.f, + 1); + clamp(&sub_bin_y1_idx, + &sub_bin_y1_idx, + 0.f, + height_in - 1.f, + 1); + + // Calculate value for sub-bin by bilinear interpolation + const int64_t left_x = + static_cast(std::floor(sub_bin_x1_idx)); + const int64_t right_x = + static_cast(std::ceil(sub_bin_x1_idx)); + const int64_t top_y = + static_cast(std::floor(sub_bin_y1_idx)); + const int64_t bottom_y = + static_cast(std::ceil(sub_bin_y1_idx)); + + const T* data_channel_ptr = + data_input + (roi_batch_id * channels_in + c_idx_in) * + height_in * width_in; + + const T top_left_sample = + data_channel_ptr[top_y * width_in + left_x]; + const T top_right_sample = + data_channel_ptr[top_y * width_in + right_x]; + const T bottom_left_sample = + data_channel_ptr[bottom_y * width_in + left_x]; + const T bottom_right_sample = + data_channel_ptr[bottom_y * width_in + right_x]; + + const float delta_left_x = + std::fabs(sub_bin_x1_idx - left_x); + const float delta_top_y = std::fabs(sub_bin_y1_idx - top_y); + + const T top_interp = + top_left_sample + + (top_right_sample - top_left_sample) * delta_left_x; + const T bottom_interp = + bottom_left_sample + + (bottom_right_sample - bottom_left_sample) * + delta_left_x; + + const T sub_bin_value = + top_interp + (bottom_interp - top_interp) * delta_top_y; + + legit_sub_bin_count++; + sub_bins_val_sum += sub_bin_value; + } + } + // Calculate average of sub_bin values for single ROI bin + if (legit_sub_bin_count != 0) + { + output[out_value_idx] = sub_bins_val_sum / legit_sub_bin_count; + } + } + } + } + } + } + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/src/op/deformable_psroi_pooling.cpp b/ngraph/core/src/op/deformable_psroi_pooling.cpp index 01fd57610d96b0..651d38694eedee 100644 --- a/ngraph/core/src/op/deformable_psroi_pooling.cpp +++ b/ngraph/core/src/op/deformable_psroi_pooling.cpp @@ -103,6 +103,9 @@ void op::v1::DeformablePSROIPooling::validate_and_infer_types() NODE_VALIDATION_CHECK( this, m_group_size > 0, "Value of `group_size` attribute has to be greater than 0 "); + NODE_VALIDATION_CHECK( + this, m_output_dim > 0, "Value of `output_dim` attribute has to be greater than 0 "); + int64_t output_rank = 4; std::vector output_dim_vec(output_rank, Dimension::dynamic()); if (box_coords_pshape.rank().is_static()) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index ea68af8da2230f..835c020e1c4fd2 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -216,6 +216,7 @@ set(SRC visitors/op/constant.cpp visitors/op/convert.cpp visitors/op/cum_sum.cpp + visitors/op/deformable_psroi_pooling.cpp visitors/op/detection_output.cpp visitors/op/elu.cpp visitors/op/extractimagepatches.cpp @@ -336,6 +337,7 @@ set(MULTI_TEST_SRC backend/ctc_greedy_decoder.in.cpp backend/ctc_greedy_decoder_seq_len.in.cpp backend/cum_sum.in.cpp + backend/deformable_psroi_pooling.in.cpp backend/detection_output.in.cpp backend/dft.in.cpp backend/divide.in.cpp diff --git a/ngraph/test/backend/deformable_psroi_pooling.in.cpp b/ngraph/test/backend/deformable_psroi_pooling.in.cpp new file mode 100644 index 00000000000000..0ad191ff5b91ef --- /dev/null +++ b/ngraph/test/backend/deformable_psroi_pooling.in.cpp @@ -0,0 +1,689 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/op/deformable_psroi_pooling.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" +#include "util/test_control.hpp" + +using namespace ngraph; + +static std::string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_00) +{ + const float spatial_scale = 0.0625; + const int64_t group_size = 2; + const size_t channels_in = 16; + size_t output_dim = channels_in / (group_size * group_size); // 4 + + size_t rois_count = 2; + + auto data_shape = Shape{1, channels_in, 2, 2}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + // ASSERT_EQ(def_psroi_pool->get_output_shape(0), (output_shape)); + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + // std::fill(data_values.begin(), data_values.end(), 0.1); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 2, + 4, + 6, + + 0, + 0, + 3, + 10, + 4, + }; + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.0); + + std::vector expected_output_values{// First ROI + 0, 4, + 8, 12, + + 16, 20, + 24, 28, + + 32, 36, + 40, 44, + + 48, 52, + 56, 60, + + // Second ROI + 0, 4, + 8, 12, + + 16, 20, + 24, 28, + + 32, 36, + 40, 44, + + 48, 52, + 56, 60}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_0p2) +{ + const float spatial_scale = 0.0625; + const int64_t group_size = 2; + const size_t channels_in = 16; + size_t output_dim = channels_in / (group_size * group_size); // 4 + + size_t rois_count = 2; + + auto data_shape = Shape{1, channels_in, 2, 2}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 2, + 4, + 6, + + 0, + 0, + 3, + 10, + 4, + }; + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.2); + + std::vector expected_output_values{// First ROI + 0, 4, + 8, 12, + + 16, 20, + 24, 28, + + 32, 36, + 40, 44, + + 48, 52, + 56, 60, + + // Second ROI + 0, 4, + 8, 12, + + 16, 20, + 24, 28, + + 32, 36, + 40, 44, + + 48, 52, + 56, 60}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_0p5) +{ + const float spatial_scale = 0.0625; + const int64_t group_size = 2; + const size_t channels_in = 16; + size_t output_dim = channels_in / (group_size * group_size); // 4 + + size_t rois_count = 2; + + auto data_shape = Shape{1, channels_in, 2, 2}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 2, + 4, + 6, + + 0, + 5, + 3, + 10, + 4, + }; + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.5); + + std::vector expected_output_values{ + // First ROI + 0, 4, + 8, 12, + + 16, 20, + 24, 28, + + 32, 36, + 40, 44, + + 48, 52, + 56, 60, + + // Second ROI + 0, 4.1875, + 8, 12.1875, + + 16, 20.1875, + 24, 28.1875, + + 32, 36.1875, + 40, 44.1875, + + 48, 52.1875, + 56, 60.1875}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_roi_oversize) +{ + const float spatial_scale = 0.0625; + const int64_t group_size = 2; + const size_t channels_in = 16; + size_t output_dim = channels_in / (group_size * group_size); // 4 + + size_t rois_count = 2; + + auto data_shape = Shape{1, channels_in, 2, 2}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 10, + 10, + 20, + 20, + + 0, + 100, + 100, + 200, + 200, + }; + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.0); + + std::vector expected_output_values{ + 0.375, 4.71875, 9.0625, 13.40625, + 16.375, 20.71875, 25.0625, 29.40625, + 32.375, 36.71875, 41.0625, 45.40625, + 48.375, 52.71875, 57.0625, 61.40625, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_no_offset_input) +{ + const float spatial_scale = 1; + const int64_t group_size = 2; + const size_t spatial_bins_x = 1; + const size_t spatial_bins_y = 1; + const float trans_std = 1.0; + const int64_t part_size = group_size; + + const size_t batch_in = 1; + const size_t channels_in = 8; + const size_t width_in = 3; + const size_t height_in = 3; + + size_t output_dim = channels_in / (group_size * group_size); // 2 + + const auto rois_dim = 1; + + auto data_shape = Shape{batch_in, channels_in, height_in, width_in}; + auto rois_shape = Shape{rois_dim, 5}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, output_dim, spatial_scale, group_size, "bilinear_deformable", spatial_bins_x, spatial_bins_y, trans_std, part_size); + + Shape output_shape{rois_dim, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 1, + 2, + 2, + }; + + std::vector expected_output_values{2.0, 12.0, 23.0, 33.0, 38.0, 48.0, 59.0, 69.0}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_zero) +{ + const float spatial_scale = 1; + const int64_t group_size = 2; + const size_t spatial_bins_x = 1; + const size_t spatial_bins_y = 1; + const float trans_std = 1.0; + const int64_t part_size = group_size; + + const size_t batch_in = 1; + const size_t channels_in = 8; + const size_t width_in = 3; + const size_t height_in = 3; + + size_t output_dim = channels_in / (group_size * group_size); // 2 + + const auto rois_dim = 1; + + auto data_shape = Shape{batch_in, channels_in, height_in, width_in}; + auto rois_shape = Shape{rois_dim, 5}; + auto offsets_shape = Shape{rois_dim, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size, "bilinear_deformable", spatial_bins_x, spatial_bins_y, trans_std, part_size); + + Shape output_shape{rois_dim, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 1, + 2, + 2, + }; + + + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.0); + + std::vector expected_output_values{2.0, 12.0, 23.0, 33.0, 38.0, 48.0, 59.0, 69.0}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_01) +{ + const float spatial_scale = 1; + const int64_t group_size = 2; + const size_t spatial_bins_x = 1; + const size_t spatial_bins_y = 1; + const float trans_std = 1.0; + const int64_t part_size = group_size; + + const size_t batch_in = 1; + const size_t channels_in = 8; + const size_t width_in = 3; + const size_t height_in = 3; + + size_t output_dim = channels_in / (group_size * group_size); // 2 + + const auto rois_dim = 1; + + auto data_shape = Shape{batch_in, channels_in, height_in, width_in}; + auto rois_shape = Shape{rois_dim, 5}; + auto offsets_shape = Shape{rois_dim, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size, + "bilinear_deformable", spatial_bins_x, spatial_bins_y, trans_std, part_size); + + Shape output_shape{rois_dim, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 1, + 2, + 2, + }; + + + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.1); + + std::vector expected_output_values{2.8, 12.8, 23.8, 33.8, 38.8, 48.8, 59.8, 69.8}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_offset_05) +{ + const float spatial_scale = 1; + const int64_t group_size = 2; + const size_t spatial_bins_x = 1; + const size_t spatial_bins_y = 1; + const float trans_std = 1.0; + const int64_t part_size = group_size; + + const size_t batch_in = 1; + const size_t channels_in = 8; + const size_t width_in = 3; + const size_t height_in = 3; + + size_t output_dim = channels_in / (group_size * group_size); // 2 + + const auto rois_dim = 1; + + auto data_shape = Shape{batch_in, channels_in, height_in, width_in}; + auto rois_shape = Shape{rois_dim, 5}; + auto offsets_shape = Shape{rois_dim, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size, "bilinear_deformable", spatial_bins_x, spatial_bins_y, trans_std, part_size); + + Shape output_shape{rois_dim, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::iota(data_values.begin(), data_values.end(), 0); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 1, + 2, + 2, + }; + + + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.5); + + std::vector expected_output_values{6., 15.5, 25.5, 35., 42., 51.5, 61.5, 71.}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_single_value) +{ + const float spatial_scale = 0.0625; + const int64_t group_size = 2; + const size_t channels_in = 16; + size_t output_dim = channels_in / (group_size * group_size); // 4 + + size_t rois_count = 1; + + auto data_shape = Shape{1, channels_in, 2, 2}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + + std::vector data_values(shape_size(data_shape)); + std::fill(data_values.begin(), data_values.end(), 0.1); + + std::vector rois_data{ + // input_batch_id, x1, y1, x2, y2 + 0, + 10, + 10, + 10, + 10, + }; + + + std::vector offsets_values(shape_size(offsets_shape)); + std::fill(offsets_values.begin(), offsets_values.end(), 0.1); + + std::vector expected_output_values{0.1, 0.1, + 0.1, 0.1, + + 0.1, 0.1, + 0.1, 0.1, + + 0.1, 0.1, + 0.1, 0.1, + + 0.1, 0.1, + 0.1, 0.1}; + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(data_values); + test.add_input(rois_data); + test.add_input(offsets_values); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} + +NGRAPH_TEST(${BACKEND_NAME}, deformable_psroi_pooling_single_value_big_shape) +{ +const int64_t output_dim = 112; + const float spatial_scale = 0.0625; + const int64_t group_size = 3; + + size_t rois_count = 2; + + auto data_shape = Shape{1, 1024, 63, 38}; + auto rois_shape = Shape{rois_count, 5}; + auto offsets_shape = Shape{rois_count, 2, group_size, group_size}; + + auto data_param = std::make_shared(element::f32, data_shape); + auto rois_param = std::make_shared(element::f32, rois_shape); + auto offsets_param = std::make_shared(element::f32, offsets_shape); + + auto def_psroi_pool = std::make_shared( + data_param, rois_param, offsets_param, output_dim, spatial_scale, group_size); + + Shape output_shape{rois_count, output_dim, group_size, group_size}; + + std::vector input_data(shape_size(data_shape)); + std::fill(input_data.begin(), input_data.end(), 0.1); + + std::vector input_rois{ + // input_batch_id, x1, y1, x2, y2 + 0, + 1, + 2, + 4, + 6, + + 0, + 0, + 3, + 10, + 4, + }; + + std::vector input_offsets(shape_size(offsets_shape)); + std::fill(input_offsets.begin(), input_offsets.end(), 0.0); + + std::vector expected_output_values(shape_size(output_shape)); + std::fill(expected_output_values.begin(), expected_output_values.end(), 0.1); + + auto f = + std::make_shared(def_psroi_pool, + ParameterVector{data_param, rois_param, offsets_param}); + + auto test = test::TestCase(f); + test.add_input(input_data); + test.add_input(input_rois); + test.add_input(input_offsets); + + test.add_expected_output(output_shape, expected_output_values); + test.run(); +} diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 1fc9a8d2f8f18e..67515e5cf1e1e8 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -2259,6 +2260,56 @@ namespace return true; } + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { + using T = typename element_type_traits::value_type; + NGRAPH_CHECK(inputs.size() > 1 && inputs[1]->get_shape().size() == 2, + "2D tensor must be provided as second input. "); + outputs[0]->set_shape({inputs[1]->get_shape()[0], + static_cast(op->get_output_dim()), + static_cast(op->get_group_size()), + static_cast(op->get_group_size())}); + + const bool has_offset_intput = inputs.size() == 3; + if (has_offset_intput) + { + runtime::reference::deformable_psroi_pooling(inputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + inputs[1]->get_data_ptr(), + inputs[1]->get_shape(), + inputs[2]->get_data_ptr(), + inputs[2]->get_shape(), + outputs[0]->get_data_ptr(), + outputs[0]->get_shape(), + op->get_mode(), + op->get_spatial_scale(), + op->get_spatial_bins_x(), + op->get_spatial_bins_y(), + op->get_trans_std(), + op->get_part_size()); + } + else + { + runtime::reference::deformable_psroi_pooling(inputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + inputs[1]->get_data_ptr(), + inputs[1]->get_shape(), + nullptr, + ngraph::Shape(), + outputs[0]->get_data_ptr(), + outputs[0]->get_shape(), + op->get_mode(), + op->get_spatial_scale(), + op->get_spatial_bins_x(), + op->get_spatial_bins_y(), + op->get_trans_std(), + op->get_part_size()); + } + return true; + } template bool evaluate(const shared_ptr& op, diff --git a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp index abf494d55d328e..16907f55ff79dd 100644 --- a/ngraph/test/runtime/interpreter/opset_int_tbl.hpp +++ b/ngraph/test/runtime/interpreter/opset_int_tbl.hpp @@ -41,6 +41,7 @@ NGRAPH_OP(BinaryConvolution, ngraph::op::v1) NGRAPH_OP(ConvertLike, op::v1) NGRAPH_OP(Convolution, ngraph::op::v1) NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) +NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) NGRAPH_OP(GroupConvolution, ngraph::op::v1) NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) NGRAPH_OP(DeformableConvolution, ngraph::op::v1) diff --git a/ngraph/test/type_prop/deformable_psroi_pooling.cpp b/ngraph/test/type_prop/deformable_psroi_pooling.cpp index 61474d800a5e6a..d20552ca699cbd 100644 --- a/ngraph/test/type_prop/deformable_psroi_pooling.cpp +++ b/ngraph/test/type_prop/deformable_psroi_pooling.cpp @@ -131,6 +131,33 @@ TEST(type_prop, deformable_psroi_pooling_invalid_group_size) } } +TEST(type_prop, deformable_psroi_pooling_invalid_output_dim) +{ + const float spatial_scale = 0.0625; + const auto rois_dim = 300; + const int64_t group_size = 3; + + try + { + const int64_t output_dim = -882; + + auto input_data = make_shared(element::f32, PartialShape{2, 7938, 63, 38}); + auto input_coords = make_shared(element::f32, PartialShape{rois_dim, 5}); + auto def_psroi_pool = make_shared( + input_data, input_coords, output_dim, spatial_scale, group_size); + + FAIL() << "Invalid output_dim not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), std::string("Value of `output_dim` attribute has to be greater than 0")); + } + catch (...) + { + FAIL() << "Unknown exception was thrown"; + } +} + TEST(type_prop, deformable_psroi_pooling_invalid_data_input_rank) { const float spatial_scale = 0.0625; diff --git a/ngraph/test/visitors/op/deformable_psroi_pooling.cpp b/ngraph/test/visitors/op/deformable_psroi_pooling.cpp new file mode 100644 index 00000000000000..9512e51dec11ab --- /dev/null +++ b/ngraph/test/visitors/op/deformable_psroi_pooling.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" + +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, deformable_psroi_pooling_op) +{ + NodeBuilder::get_ops().register_factory(); + auto input = make_shared(element::f32, Shape{2, 16, 67, 32}); + auto coords = make_shared(element::f32, Shape{300, 5}); + + const int output_dim = 4; + const float spatial_scale = 0.0625; + const int group_size = 2; + string mode = "bilinear_deformable"; + const int spatial_bins_x = 2; + const int spatial_bins_y = 3; + const float trans_std = 0.1; + const int part_size = 3; + + auto op = make_shared( + input, coords, output_dim, spatial_scale, group_size, mode, spatial_bins_x, spatial_bins_y, trans_std, part_size); + NodeBuilder builder(op); + auto g_op = as_type_ptr(builder.create()); + + EXPECT_EQ(g_op->get_output_dim(), op->get_output_dim()); + EXPECT_EQ(g_op->get_spatial_scale(), op->get_spatial_scale()); + EXPECT_EQ(g_op->get_group_size(), op->get_group_size()); + EXPECT_EQ(g_op->get_mode(), op->get_mode()); + EXPECT_EQ(g_op->get_spatial_bins_x(), op->get_spatial_bins_x()); + EXPECT_EQ(g_op->get_spatial_bins_y(), op->get_spatial_bins_y()); + EXPECT_EQ(g_op->get_trans_std(), op->get_trans_std()); + EXPECT_EQ(g_op->get_part_size(), op->get_part_size()); +} From 39fde540d2e11e350ec9ca4d6e391ba19a65dd94 Mon Sep 17 00:00:00 2001 From: Bartosz Lesniewski Date: Thu, 13 May 2021 06:45:29 +0200 Subject: [PATCH 21/27] Revise Unsqueeze op - op class (#5555) * revise unsqueeze op class * Added checks and tests for second input incorrect type and shape * Remove axes type constraints to keep backward compatibility --- ngraph/core/src/op/unsqueeze.cpp | 12 ++++--- ngraph/test/type_prop/unsqueeze.cpp | 51 +++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index dd593d0920e155..cc1cfa9da0a2a5 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -34,23 +34,27 @@ void op::v0::Unsqueeze::validate_and_infer_types() const auto data_rank = data_partial_shape.rank(); const auto axes_constant = get_constant_from_source(input_value(1)); + auto axes_pshape = get_input_partial_shape(1); + + NODE_VALIDATION_CHECK(this, + axes_pshape.rank().compatible(0) || axes_pshape.rank().compatible(1), + "Second input (axes) should not be of rank higher than 1. Got: ", + axes_pshape.rank().get_length()); + if (data_rank.is_dynamic() || !axes_constant) { set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); return; } - uint64_t data_rank_value = data_partial_shape.rank().get_length(); - - // Get value of axes from Constant const auto axes_values = axes_constant->cast_vector(); + uint64_t data_rank_value = data_partial_shape.rank().get_length(); const int64_t expanded_rank = data_rank_value + axes_values.size(); NODE_VALIDATION_CHECK(this, !axes_values.empty(), "'axes' input is mandatory"); auto normalized_axes = normalize_axes(this->description(), axes_values, expanded_rank); set axes(begin(normalized_axes), end(normalized_axes)); - vector output_shape{data_partial_shape}; for (auto axis : axes) { diff --git a/ngraph/test/type_prop/unsqueeze.cpp b/ngraph/test/type_prop/unsqueeze.cpp index 252e3139b73aa8..c38ed797437f59 100644 --- a/ngraph/test/type_prop/unsqueeze.cpp +++ b/ngraph/test/type_prop/unsqueeze.cpp @@ -37,3 +37,54 @@ TEST(type_prop, unsqueeze_dynamic) Dimension::dynamic(), Dimension::dynamic()})); } + +TEST(type_prop, unsqueeze_incorrect_axes_shape) +{ + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = + make_shared(element::u64, Shape{1, 1, 1}, vector{1}); + + try + { + auto unsqueeze = make_shared(param, axes_node); + FAIL() << "Unsqueeze axes invalid rank not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), + "Second input (axes) should not be of rank higher than 1"); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, unsqueeze_empty_axes) +{ + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, Shape{0}, vector{}); + try + { + auto unsqueeze = make_shared(param, axes_node); + FAIL() << "Unsqueeze axes empty not detected"; + } + catch (const NodeValidationFailure& error) + { + EXPECT_HAS_SUBSTRING(error.what(), "'axes' input is mandatory"); + } + catch (...) + { + FAIL() << "Deduced type check failed for unexpected reason"; + } +} + +TEST(type_prop, unsqueeze_dynamic_axes) +{ + auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto axes_node = make_shared(element::u64, PartialShape::dynamic()); + + auto unsqueeze = make_shared(param, axes_node); + ASSERT_EQ(unsqueeze->get_element_type(), element::f32); + ASSERT_EQ(unsqueeze->get_output_partial_shape(0), PartialShape::dynamic()); +} \ No newline at end of file From 07a49184c0001ab609169b9f0755ce564cd543cb Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 13 May 2021 07:52:55 +0300 Subject: [PATCH 22/27] Fixed typo in length (#5611) --- docs/ops/movement/VariadicSplit_1.md | 2 +- .../legacy/ngraph_ops/lstm_sequence_ie.hpp | 2 +- .../src/ngraph_ops/gru_sequence_ie.cpp | 6 +++--- .../src/ngraph_ops/lstm_sequence_ie.cpp | 6 +++--- .../src/ngraph_ops/rnn_sequence_ie.cpp | 2 +- .../src/vpu/myriad_plugin/myriad_metrics.cpp | 4 ++-- .../onnx_reader/onnx_reader_external_data.cpp | 2 +- .../convert_sequences_to_sequences_ie_test.cpp | 4 ++-- .../single_layer_tests/gru_sequence.cpp | 2 +- .../single_layer_tests/loop.cpp | 2 +- .../single_layer_tests/lstm_sequence.cpp | 2 +- .../single_layer_tests/rnn_sequence.cpp | 2 +- .../single_layer_tests/tensor_iterator.cpp | 2 +- .../cpu/single_layer_tests/gru_sequence.cpp | 10 +++++----- .../cpu/single_layer_tests/lstm_sequence.cpp | 8 ++++---- .../cpu/single_layer_tests/rnn_sequence.cpp | 10 +++++----- .../single_layer_tests/gru_sequence.cpp | 2 +- .../single_layer_tests/lstm_sequence.cpp | 2 +- .../single_layer_tests/rnn_sequence.cpp | 2 +- .../single_layer/ctc_loss.hpp | 2 +- .../src/single_layer/gru_sequence.cpp | 14 +++++++------- .../src/single_layer/lstm_sequence.cpp | 14 +++++++------- .../src/single_layer/rnn_sequence.cpp | 14 +++++++------- .../src/single_layer/tensor_iterator.cpp | 16 ++++++++-------- .../graph_optimizer/prepare_buffer_fusing.cpp | 6 +++--- inference-engine/thirdparty/mkl-dnn | 2 +- .../movidius/mvnc/tests/mvnc_no_boot_tests.cpp | 8 ++++---- .../movidius/mvnc/tests/mvnc_tests_common.cpp | 8 ++++---- .../movidius/mvnc/tests/mvnc_tests_usb.cpp | 12 ++++++------ .../thirdparty/pugixml/src/pugixml.cpp | 8 ++++---- ngraph/core/include/ngraph/op/ctc_loss.hpp | 2 +- ngraph/core/include/ngraph/op/lstm_sequence.hpp | 6 +++--- ngraph/frontend/onnx_import/src/op/slice.cpp | 2 +- .../src/utils/tensor_external_data.cpp | 16 ++++++++-------- .../src/utils/tensor_external_data.hpp | 2 +- .../tests/test_ngraph/test_data_movement.py | 6 +++--- ngraph/test/backend/reverse_sequence.in.cpp | 16 ++++++++-------- .../test/onnx/onnx_import_external_data.in.cpp | 4 ++-- ngraph/test/type_prop/reverse_sequence.cpp | 14 +++++++------- 39 files changed, 122 insertions(+), 122 deletions(-) diff --git a/docs/ops/movement/VariadicSplit_1.md b/docs/ops/movement/VariadicSplit_1.md index 50bbbd6e959311..87c9ac67b65f2e 100644 --- a/docs/ops/movement/VariadicSplit_1.md +++ b/docs/ops/movement/VariadicSplit_1.md @@ -26,7 +26,7 @@ Where D is the rank of input tensor `data`. The sum of elements in `split_length * **2**: `axis`. Axis along `data` to split. A scalar of type `T2` with value from range `-rank(data) .. rank(data)-1`. Negative values address dimensions from the end. **Required.** -* **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lenghts` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.** +* **3**: `split_lengths`. A list containing the dimension values of each output tensor shape along the split `axis`. A 1D tensor of type `T2`. The number of elements in `split_lengths` determines the number of outputs. The sum of elements in `split_lengths` must match `data.shape[axis]`. In addition `split_lengths` can contain a single `-1` element, which means, all remaining items along specified `axis` that are not consumed by other parts. **Required.** **Outputs** diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lstm_sequence_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lstm_sequence_ie.hpp index 68f2688a57fa14..641f93bd0639b4 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lstm_sequence_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/lstm_sequence_ie.hpp @@ -24,7 +24,7 @@ class INFERENCE_ENGINE_API_CLASS(LSTMSequenceIE) : public ngraph::op::util::RNNC LSTMSequenceIE(const Output &X, const Output &H_t, const Output &C_t, - const Output &seq_lenghts, + const Output &seq_lengths, const Output &WR, const Output &B, size_t hidden_size, diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/gru_sequence_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/gru_sequence_ie.cpp index 296e65a786d7af..1920221c47684f 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/gru_sequence_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/gru_sequence_ie.cpp @@ -16,7 +16,7 @@ NGRAPH_RTTI_DEFINITION(op::GRUSequenceIE, "GRUSequenceIE", 4); op::GRUSequenceIE::GRUSequenceIE(const Output& X, const Output& H_t, - const Output& seq_lenghts, + const Output& seq_lengths, const Output& WR, const Output& B, std::size_t hidden_size, @@ -27,7 +27,7 @@ op::GRUSequenceIE::GRUSequenceIE(const Output& X, float clip, bool linear_before_reset, int64_t seq_axis) - : RNNCellBase({X, H_t, seq_lenghts, WR, B}, hidden_size, clip, activations, activations_alpha, activations_beta), + : RNNCellBase({X, H_t, seq_lengths, WR, B}, hidden_size, clip, activations, activations_alpha, activations_beta), m_direction(direction), m_linear_before_reset(linear_before_reset), m_seq_axis(seq_axis) { @@ -50,7 +50,7 @@ void op::GRUSequenceIE::validate_and_infer_types() { auto b_pshape = get_input_partial_shape(4); std::vector pshapes = {x_pshape, h_state_pshape, seq_lengths_pshape, wr_pshape, b_pshape}; - std::vector in_names = {"X", "H", "seq_lenghts", "WR", "B"}; + std::vector in_names = {"X", "H", "seq_lengths", "WR", "B"}; // num_direction dimension should be squeezed, we don't support bidirectional case std::vector ranks = {3, 2, 1, 2, 1}; for (size_t i = 0; i < pshapes.size(); ++i) { diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/lstm_sequence_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/lstm_sequence_ie.cpp index e0aa3f3ef77064..b32ae8205efee9 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/lstm_sequence_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/lstm_sequence_ie.cpp @@ -17,7 +17,7 @@ NGRAPH_RTTI_DEFINITION(op::LSTMSequenceIE, "LSTMSequenceIE", 5); op::LSTMSequenceIE::LSTMSequenceIE(const Output &X, const Output &H_t, const Output &C_t, - const Output &seq_lenghts, + const Output &seq_lengths, const Output &WR, const Output &B, std::size_t hidden_size, @@ -27,7 +27,7 @@ op::LSTMSequenceIE::LSTMSequenceIE(const Output &X, const std::vector &activations_beta, float clip, int64_t seq_axis) - : RNNCellBase({X, H_t, C_t, seq_lenghts, WR, B}, hidden_size, clip, activations, activations_alpha, activations_beta), + : RNNCellBase({X, H_t, C_t, seq_lengths, WR, B}, hidden_size, clip, activations, activations_alpha, activations_beta), m_direction(direction), m_seq_axis(seq_axis) { constructor_validate_and_infer_types(); @@ -52,7 +52,7 @@ void op::LSTMSequenceIE::validate_and_infer_types() { std::vector pshapes = {x_pshape, h_state_pshape, c_state_pshape, seq_lengths_pshape, wr_pshape, b_pshape}; - std::vector in_names = {"X", "H", "C", "seq_lenghts", "WR", "B"}; + std::vector in_names = {"X", "H", "C", "seq_lengths", "WR", "B"}; // num_direction dimension should be squeezed, we don't support bidirectional case std::vector ranks = {3, 2, 2, 1, 2, 1}; for (size_t i = 0; i < pshapes.size(); ++i) { diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/rnn_sequence_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/rnn_sequence_ie.cpp index 6b0a1c97dd400d..168885de40642f 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/rnn_sequence_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/rnn_sequence_ie.cpp @@ -48,7 +48,7 @@ void op::RNNSequenceIE::validate_and_infer_types() { auto b_pshape = get_input_partial_shape(4); std::vector pshapes = {x_pshape, h_state_pshape, seq_lengths_pshape, wr_pshape, b_pshape}; - std::vector in_names = {"X", "H", "seq_lenghts", "WR", "B"}; + std::vector in_names = {"X", "H", "seq_lengths", "WR", "B"}; // num_direction dimension should be squeezed, we don't support bidirectional case std::vector ranks = {3, 2, 1, 2, 1}; for (size_t i = 0; i < pshapes.size(); ++i) { diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_metrics.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_metrics.cpp index 3d314cc873cf10..fc022ff4184eae 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_metrics.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_metrics.cpp @@ -81,13 +81,13 @@ std::vector MyriadMetrics::AvailableDevicesNames( std::string MyriadMetrics::FullName(std::string deviceName) const { std::string nameDelimiter("-ma"); - unsigned int indexLenght = 4; + unsigned int indexLength = 4; unsigned int placeOfTypeId = 2; auto indexStr = deviceName; indexStr.erase(0, indexStr.find(nameDelimiter) + nameDelimiter.length()); - if (indexLenght != indexStr.length()) { + if (indexLength != indexStr.length()) { return deviceName; } else { auto myriadId = std::string(1, indexStr[placeOfTypeId]); diff --git a/inference-engine/tests/functional/inference_engine/onnx_reader/onnx_reader_external_data.cpp b/inference-engine/tests/functional/inference_engine/onnx_reader/onnx_reader_external_data.cpp index 3a72dbf6e80525..2ae931d76b8c1e 100644 --- a/inference-engine/tests/functional/inference_engine/onnx_reader/onnx_reader_external_data.cpp +++ b/inference-engine/tests/functional/inference_engine/onnx_reader/onnx_reader_external_data.cpp @@ -64,7 +64,7 @@ TEST(ONNX_Reader_Tests, ImportModelWithExternalDataFromStringException) { EXPECT_PRED_FORMAT2( testing::IsSubstring, - std::string("data/tensor.data, offset: 0, data_lenght: 0, sha1_digest: 0)"), + std::string("data/tensor.data, offset: 0, data_length: 0, sha1_digest: 0)"), e.what()); } catch(...) { diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp index 30e69d7f7b72ca..76b4cce3f4cb16 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp @@ -229,7 +229,7 @@ TEST(TransformationTests, LSTMSequenceConversionTest) { ngraph::Shape{batch_size, num_directions, hidden_size}); const auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto seq_lenghts = std::make_shared(ngraph::element::f32, + const auto seq_lengths = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size}); const auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, @@ -245,7 +245,7 @@ TEST(TransformationTests, LSTMSequenceConversionTest) { auto sequence_ie = std::make_shared(X, in_1, in_2, - seq_lenghts, + seq_lengths, in_3, in_4, sequence->get_hidden_size(), diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 7e3dea8b04fc2e..5521023a192fd2 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -16,7 +16,7 @@ namespace { ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ}; - // output values increase rapidly without clip, so use only seq_lenghts = 2 + // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{10}; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp index ad294a4ce1f6ff..1a14f9a97e4cbe 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/loop.cpp @@ -9,7 +9,7 @@ using namespace LayerTestsDefinitions; namespace { - // without clip values increase rapidly, so use only seq_lenghts = 2 + // without clip values increase rapidly, so use only seq_lengths = 2 std::vector execute_first_iteration{true}; std::vector is_body_condition_const{true/*, false*/}; std::vector body_condition{true/*, false*/}; // works only if is_body_condition_const == true diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index 1fbe1190a76142..3ccbddc8f31c35 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -16,7 +16,7 @@ namespace { ngraph::helpers::SequenceTestsMode::PURE_SEQ, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM}; - // output values increase rapidly without clip, so use only seq_lenghts = 2 + // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{10}; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp index 0805ff960d6124..3db31d471c532c 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp @@ -16,7 +16,7 @@ namespace { ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ}; - // output values increase rapidly without clip, so use only seq_lenghts = 2 + // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{1, 10}; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/tensor_iterator.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/tensor_iterator.cpp index 2531a6668c408f..2a551b695f9685 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/tensor_iterator.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/tensor_iterator.cpp @@ -11,7 +11,7 @@ using namespace LayerTestsDefinitions; namespace { std::vector should_decompose = {true, false}; - // output values increase rapidly without clip, so use only seq_lenghts = 2 + // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{1, 10}; diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/gru_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/gru_sequence.cpp index b3f7b213136f22..55ece3f0a663be 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/gru_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/gru_sequence.cpp @@ -46,7 +46,7 @@ class GRUSequenceCPUTest : public testing::WithParamInterface additionalConfig; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size = 10; @@ -60,11 +60,11 @@ class GRUSequenceCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - std::tie(m_mode, seq_lenghts, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, targetDevice) = basicParamsSet; + std::tie(m_mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, targetDevice) = basicParamsSet; size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, 3 * hidden_size, input_size}, @@ -89,7 +89,7 @@ class GRUSequenceCPUTest : public testing::WithParamInterface mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; -// output values increase rapidly without clip, so use only seq_lenghts = 2 +// output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector batch{10}; std::vector batch_size_one{1}; diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/lstm_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/lstm_sequence.cpp index 538ca675a7fa58..234a579085cd01 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/lstm_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/lstm_sequence.cpp @@ -47,7 +47,7 @@ class LSTMSequenceCPUTest : public testing::WithParamInterface additionalConfig; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size; @@ -60,12 +60,12 @@ class LSTMSequenceCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - std::tie(m_mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = basicParamsSet; + std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = basicParamsSet; size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; - m_max_seq_len = seq_lenghts; + m_max_seq_len = seq_lengths; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, {batch}, diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/rnn_sequence.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/rnn_sequence.cpp index 009dcdd01f2686..6c7f873cc95c9d 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/rnn_sequence.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/rnn_sequence.cpp @@ -46,7 +46,7 @@ class RNNSequenceCPUTest : public testing::WithParamInterface additionalConfig; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size; @@ -59,11 +59,11 @@ class RNNSequenceCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - std::tie(m_mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = basicParamsSet; + std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = basicParamsSet; size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, hidden_size, input_size}, @@ -82,7 +82,7 @@ class RNNSequenceCPUTest : public testing::WithParamInterface mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; -// output values increase rapidly without clip, so use only seq_lenghts = 2 +// output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector batch{10}; std::vector batch_size_one{1}; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 9474335c2fe20c..d0033f4f4738c3 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -16,7 +16,7 @@ namespace { ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, ngraph::helpers::SequenceTestsMode::PURE_SEQ}; - // output values increase rapidly without clip, so use only seq_lenghts = 2 + // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{10}; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index 01c284093019a6..d79cee69eea5d5 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -16,7 +16,7 @@ std::vector mode{ngraph::helpers::SequenceTe ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, ngraph::helpers::SequenceTestsMode::PURE_SEQ}; -// output values increase rapidly without clip, so use only seq_lenghts = 2 +// output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{10}; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp index 10f888cec1ea05..718c1b6e771540 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/rnn_sequence.cpp @@ -16,7 +16,7 @@ std::vector mode{ngraph::helpers::SequenceTe ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, ngraph::helpers::SequenceTestsMode::PURE_SEQ}; -// output values increase rapidly without clip, so use only seq_lenghts = 2 +// output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{1, 10}; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp index bcf89fef2b7994..7b8a67d95c4dbc 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { typedef std::tuple< std::vector, // Logits shapes - std::vector, // logits lenght + std::vector, // logits length std::vector>, // labels std::vector, // labels length int, // blank index diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp index c89a1bc06e29e1..f43cca3ca6d98b 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string GRUSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { ngraph::helpers::SequenceTestsMode mode; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size = 10; @@ -22,7 +22,7 @@ namespace LayerTestsDefinitions { ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; - std::tie(mode, seq_lenghts, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, + std::tie(mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, targetDevice) = obj.param; std::vector> inputShapes = { {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, @@ -30,7 +30,7 @@ namespace LayerTestsDefinitions { }; std::ostringstream result; result << "mode=" << mode << "_"; - result << "seq_lenghts=" << seq_lenghts << "_"; + result << "seq_lengths=" << seq_lengths << "_"; result << "batch=" << batch << "_"; result << "hidden_size=" << hidden_size << "_"; result << "input_size=" << input_size << "_"; @@ -45,7 +45,7 @@ namespace LayerTestsDefinitions { void GRUSequenceTest::SetUp() { using namespace ngraph::helpers; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size = 10; @@ -56,15 +56,15 @@ namespace LayerTestsDefinitions { bool linear_before_reset; ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lenghts, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, + std::tie(m_mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, netPrecision, targetDevice) = this->GetParam(); size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, {batch, num_directions, hidden_size}, {batch}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, 3 * hidden_size, input_size}, {num_directions, 3 * hidden_size, hidden_size}, {num_directions, (linear_before_reset ? 4 : 3) * hidden_size}}, }; - m_max_seq_len = seq_lenghts; + m_max_seq_len = seq_lengths; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]}); if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp index f26475d84eb0d0..7bb4289ec735d0 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp @@ -11,7 +11,7 @@ namespace LayerTestsDefinitions { std::string LSTMSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { ngraph::helpers::SequenceTestsMode mode; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size; @@ -22,7 +22,7 @@ namespace LayerTestsDefinitions { ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; - std::tie(mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, + std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = obj.param; std::vector> inputShapes = { {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, @@ -30,7 +30,7 @@ namespace LayerTestsDefinitions { }; std::ostringstream result; result << "mode=" << mode << "_"; - result << "seq_lenghts=" << seq_lenghts << "_"; + result << "seq_lengths=" << seq_lengths << "_"; result << "batch=" << batch << "_"; result << "hidden_size=" << hidden_size << "_"; result << "input_size=" << input_size << "_"; @@ -46,7 +46,7 @@ namespace LayerTestsDefinitions { void LSTMSequenceTest::SetUp() { using namespace ngraph::helpers; using namespace ngraph::builder; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; @@ -57,12 +57,12 @@ namespace LayerTestsDefinitions { float clip; ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, + std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = this->GetParam(); size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; - m_max_seq_len = seq_lenghts; + m_max_seq_len = seq_lengths; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, 4 * hidden_size, input_size}, {num_directions, 4 * hidden_size, hidden_size}, {num_directions, 4 * hidden_size}}, }; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp index c8ffc3111966fe..a87e79ed9dd3a1 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string RNNSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { ngraph::helpers::SequenceTestsMode mode; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size; @@ -21,7 +21,7 @@ namespace LayerTestsDefinitions { ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; - std::tie(mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, + std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = obj.param; std::vector> inputShapes = { {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {hidden_size, input_size}, @@ -29,7 +29,7 @@ namespace LayerTestsDefinitions { }; std::ostringstream result; result << "mode=" << mode << "_"; - result << "seq_lenghts=" << seq_lenghts << "_"; + result << "seq_lengths=" << seq_lengths << "_"; result << "batch=" << batch << "_"; result << "hidden_size=" << hidden_size << "_"; result << "input_size=" << input_size << "_"; @@ -44,7 +44,7 @@ namespace LayerTestsDefinitions { void RNNSequenceTest::SetUp() { using namespace ngraph::helpers; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size; @@ -54,15 +54,15 @@ namespace LayerTestsDefinitions { float clip; ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lenghts, batch, hidden_size, input_size, activations, clip, direction, netPrecision, + std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, netPrecision, targetDevice) = this->GetParam(); size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector> inputShapes = { - {{batch, seq_lenghts, input_size}, {batch, num_directions, hidden_size}, {batch}, + {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, hidden_size, input_size}, {num_directions, hidden_size, hidden_size}, {num_directions, hidden_size}}, }; - m_max_seq_len = seq_lenghts; + m_max_seq_len = seq_lengths; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto params = ngraph::builder::makeParams(ngPrc, {inputShapes[0], inputShapes[1]}); if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp index 6686c917e9dac8..ebbcd2753a8b34 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp @@ -9,7 +9,7 @@ namespace LayerTestsDefinitions { std::string TensorIteratorTest::getTestCaseName(const testing::TestParamInfo &obj) { bool should_decompose; - size_t seq_lenghts; + size_t seq_lengths; size_t batch; size_t hidden_size; size_t input_size = 10; @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; - std::tie(should_decompose, seq_lenghts, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, + std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, targetDevice) = obj.param; std::vector> inputShapes = {}; @@ -44,7 +44,7 @@ namespace LayerTestsDefinitions { std::ostringstream result; result << "unrolling=" << should_decompose << "_"; - result << "seq_len=" << seq_lenghts << "_"; + result << "seq_len=" << seq_lengths << "_"; result << "seq_len_axis=" << sequence_axis << "_"; result << "batch=" << batch << "_"; result << "hidden_size=" << hidden_size << "_"; @@ -59,7 +59,7 @@ namespace LayerTestsDefinitions { } void TensorIteratorTest::SetUp() { - size_t seq_lenghts; + size_t seq_lengths; bool should_decompose; size_t batch; size_t hidden_size; @@ -69,7 +69,7 @@ namespace LayerTestsDefinitions { float clip; ngraph::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; - std::tie(should_decompose, seq_lenghts, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, + std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, targetDevice) = this->GetParam(); std::vector> inputShapes; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); @@ -84,7 +84,7 @@ namespace LayerTestsDefinitions { switch (ti_body) { case ngraph::helpers::TensorIteratorBody::LSTM: { inputShapes = { - {{batch, seq_lenghts, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, + {{batch, seq_lengths, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, {4 * hidden_size, hidden_size}, {4 * hidden_size}}, }; if (sequence_axis == 0) { @@ -130,7 +130,7 @@ namespace LayerTestsDefinitions { } case ngraph::helpers::TensorIteratorBody::GRU: { inputShapes = { - {{batch, seq_lenghts, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, + {{batch, seq_lengths, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, {3 * hidden_size, hidden_size}, {3 * hidden_size}}, }; if (sequence_axis == 0) { @@ -172,7 +172,7 @@ namespace LayerTestsDefinitions { break; } case ngraph::helpers::TensorIteratorBody::RNN: { - inputShapes = {{batch, seq_lenghts, input_size}, + inputShapes = {{batch, seq_lengths, input_size}, {batch, hidden_size}, {hidden_size, input_size}, {hidden_size, hidden_size}, diff --git a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_buffer_fusing.cpp b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_buffer_fusing.cpp index d3618d1034bda2..befc677b6f712d 100644 --- a/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_buffer_fusing.cpp +++ b/inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_buffer_fusing.cpp @@ -209,7 +209,7 @@ void concat_in_place_optimization::optimize_cascade(concatenation_node& node, st // apply concatenation in place optimization for (auto input : node.get_dependencies()) { - auto input_lenght = input->get_output_layout().size.raw[concat_axis]; + auto input_length = input->get_output_layout().size.raw[concat_axis]; if (input->is_type() && input->can_be_optimized()) need_reoptimization.push_back(&input->as()); @@ -218,7 +218,7 @@ void concat_in_place_optimization::optimize_cascade(concatenation_node& node, st // // |--- lower padd ---| |---------- upper padd -----------| // |-- output padd ---| ----- input1 ------|----- input2 -----|-- out padd --| - upper_padd.raw[concat_axis] -= input_lenght; + upper_padd.raw[concat_axis] -= input_length; // set new padding for input input->set_output_padding(padding(lower_padd.sizes(), upper_padd.sizes())); @@ -227,7 +227,7 @@ void concat_in_place_optimization::optimize_cascade(concatenation_node& node, st // // |-------------- lower padd -------------|---------- upper padd -----------| // |-- output padd ---| ----- input1 ------|----- input2 -----|-- out padd --| - lower_padd.raw[concat_axis] += input_lenght; + lower_padd.raw[concat_axis] += input_length; } node.can_be_optimized(true); diff --git a/inference-engine/thirdparty/mkl-dnn b/inference-engine/thirdparty/mkl-dnn index d8514fcf88e977..a5fffb52b012b3 160000 --- a/inference-engine/thirdparty/mkl-dnn +++ b/inference-engine/thirdparty/mkl-dnn @@ -1 +1 @@ -Subproject commit d8514fcf88e9770f671074ffe0ea853d734ebbcd +Subproject commit a5fffb52b012b31c65ace894638ecfb8948de9ec diff --git a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_no_boot_tests.cpp b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_no_boot_tests.cpp index 4293da658a8daf..fe8c522ac3c311 100644 --- a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_no_boot_tests.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_no_boot_tests.cpp @@ -31,20 +31,20 @@ TEST_F(MvncNoBootOpenDevice, OpenTwiceSameHandler) { deviceDesc.platform = NC_ANY_PLATFORM; char dev_addr_first_open[MAX_DEV_NAME]; - unsigned int data_lenght_first = MAX_DEV_NAME; + unsigned int data_length_first = MAX_DEV_NAME; char dev_addr_second_open[MAX_DEV_NAME]; - unsigned int data_lenght_second = MAX_DEV_NAME; + unsigned int data_length_second = MAX_DEV_NAME; // First open, get device name ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle, deviceDesc, watchdogInterval, firmwarePath)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle, NC_RO_DEVICE_NAME, - dev_addr_first_open, &data_lenght_first)); + dev_addr_first_open, &data_length_first)); // Second open, get device name ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle, deviceDesc, watchdogInterval, firmwarePath)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle, NC_RO_DEVICE_NAME, - dev_addr_second_open, &data_lenght_second)); + dev_addr_second_open, &data_length_second)); ASSERT_NO_ERROR(ncDeviceClose(&deviceHandle)); // Should be the same device diff --git a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_common.cpp b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_common.cpp index 02eda4c3dcaed7..0f7b69d1b4935a 100644 --- a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_common.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_common.cpp @@ -220,20 +220,20 @@ TEST_P(MvncOpenDevice, OpenTwiceSameHandler) { deviceDesc.platform = NC_ANY_PLATFORM; char dev_addr_first_open[MAX_DEV_NAME]; - unsigned int data_lenght_first = MAX_DEV_NAME; + unsigned int data_length_first = MAX_DEV_NAME; char dev_addr_second_open[MAX_DEV_NAME]; - unsigned int data_lenght_second = MAX_DEV_NAME; + unsigned int data_length_second = MAX_DEV_NAME; // First open, get device name ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle, deviceDesc, m_ncDeviceOpenParams)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle, NC_RO_DEVICE_NAME, - dev_addr_first_open, &data_lenght_first)); + dev_addr_first_open, &data_length_first)); // Second open, get device name ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle, deviceDesc, m_ncDeviceOpenParams)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle, NC_RO_DEVICE_NAME, - dev_addr_second_open, &data_lenght_second)); + dev_addr_second_open, &data_length_second)); ASSERT_NO_ERROR(ncDeviceClose(&deviceHandle, m_watchdogHndl)); // Should be the same device diff --git a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_usb.cpp b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_usb.cpp index 65a2a09fb233bc..87544a6e0c6e15 100644 --- a/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_usb.cpp +++ b/inference-engine/thirdparty/movidius/mvnc/tests/mvnc_tests_usb.cpp @@ -113,7 +113,7 @@ TEST_F(MvncOpenUSBDevice, OpenAvailableDeviceByName) { GTEST_SKIP(); char dev_addr_open[NC_MAX_NAME_SIZE]; - unsigned int data_lenght = NC_MAX_NAME_SIZE; + unsigned int data_length = NC_MAX_NAME_SIZE; auto availableDevices = getDevicesList(); @@ -122,7 +122,7 @@ TEST_F(MvncOpenUSBDevice, OpenAvailableDeviceByName) { ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle_, deviceDesc_, m_ncDeviceOpenParams)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle_, NC_RO_DEVICE_NAME, - dev_addr_open, &data_lenght)); + dev_addr_open, &data_length)); ASSERT_TRUE(strncmp(dev_addr_open, deviceDesc_.name, NC_MAX_NAME_SIZE) == 0); ASSERT_NO_ERROR(ncDeviceClose(&deviceHandle_, m_watchdogHndl)); @@ -147,10 +147,10 @@ TEST_F(MvncOpenUSBDevice, OpenTwiceSameHandlerByName) { GTEST_SKIP(); char dev_addr_first_open[MAX_DEV_NAME]; - unsigned int data_lenght_first = MAX_DEV_NAME; + unsigned int data_length_first = MAX_DEV_NAME; char dev_addr_second_open[MAX_DEV_NAME]; - unsigned int data_lenght_second = MAX_DEV_NAME; + unsigned int data_length_second = MAX_DEV_NAME; auto availableDevices = getDevicesList(); @@ -159,12 +159,12 @@ TEST_F(MvncOpenUSBDevice, OpenTwiceSameHandlerByName) { ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle_, deviceDesc_, m_ncDeviceOpenParams)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle_, NC_RO_DEVICE_NAME, - dev_addr_first_open, &data_lenght_first)); + dev_addr_first_open, &data_length_first)); // Second open, get device name ASSERT_NO_ERROR(ncDeviceOpen(&deviceHandle_, deviceDesc_, m_ncDeviceOpenParams)); ASSERT_NO_ERROR(ncDeviceGetOption(deviceHandle_, NC_RO_DEVICE_NAME, - dev_addr_second_open, &data_lenght_second)); + dev_addr_second_open, &data_length_second)); ASSERT_NO_ERROR(ncDeviceClose(&deviceHandle_, m_watchdogHndl)); // Should be the same device diff --git a/inference-engine/thirdparty/pugixml/src/pugixml.cpp b/inference-engine/thirdparty/pugixml/src/pugixml.cpp index 69eba5e127731e..e6c409bc0f5741 100644 --- a/inference-engine/thirdparty/pugixml/src/pugixml.cpp +++ b/inference-engine/thirdparty/pugixml/src/pugixml.cpp @@ -3953,9 +3953,9 @@ PUGI__NS_BEGIN // skip ]] if we stopped at ]]>, > will go to the next CDATA section if (*s) s += 2; - size_t bufLenght = static_cast(s - prev); + size_t bufLength = static_cast(s - prev); - writer.write_buffer(prev, bufLenght); + writer.write_buffer(prev, bufLength); writer.write(']', ']', '>'); } @@ -4013,9 +4013,9 @@ PUGI__NS_BEGIN // look for -\0 or -- sequence - we can't output it since -- is illegal in comment body while (*s && !(s[0] == '-' && (s[1] == '-' || s[1] == 0))) ++s; - size_t bufLenght = static_cast(s - prev); + size_t bufLength = static_cast(s - prev); - writer.write_buffer(prev, bufLenght); + writer.write_buffer(prev, bufLength); if (*s) { diff --git a/ngraph/core/include/ngraph/op/ctc_loss.hpp b/ngraph/core/include/ngraph/op/ctc_loss.hpp index 23f8e4ff570122..b5e364877af600 100644 --- a/ngraph/core/include/ngraph/op/ctc_loss.hpp +++ b/ngraph/core/include/ngraph/op/ctc_loss.hpp @@ -21,7 +21,7 @@ namespace ngraph /// \brief Constructs a CTCLoss operation /// /// \param logits 3-D tensor of logits - /// \param logit_length 1-D tensor of lenght for each object from + /// \param logit_length 1-D tensor of length for each object from /// a batch /// \param labels 2-D tensor of labels for which likelyhood /// is estimated using logist diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp index b5c0ec5dfefca6..eb0692efcd080e 100644 --- a/ngraph/core/include/ngraph/op/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -98,13 +98,13 @@ namespace ngraph private: /// - /// \brief Gets the masked value according to sequence lenght in a batch. + /// \brief Gets the masked value according to sequence length in a batch. /// /// \note Zeros out values or sets them to default value for inputs with - /// sequence lenght shorter than currently procssed time step. + /// sequence length shorter than currently procssed time step. /// /// \param[in] data The input value. - /// \param[in] time_step The current time step denoting sequence lenght. + /// \param[in] time_step The current time step denoting sequence length. /// \param[in] batch_axis The batch axis index of data tensor. /// \param[in] default_value The default value for masked elements. /// diff --git a/ngraph/frontend/onnx_import/src/op/slice.cpp b/ngraph/frontend/onnx_import/src/op/slice.cpp index 2b9f5f0b52088c..26010a5bda0293 100644 --- a/ngraph/frontend/onnx_import/src/op/slice.cpp +++ b/ngraph/frontend/onnx_import/src/op/slice.cpp @@ -54,7 +54,7 @@ namespace ngraph /// value) or ignored (1 value) /// /// \param[in] axes Axes input of ONNX Slice operator - /// \param[in] slice_indices_length Lenght of Slice indices + /// \param[in] slice_indices_length Length of Slice indices /// (starts, ends, steps) /// /// \return Mask attribute in format required by StridedSlice:v1 diff --git a/ngraph/frontend/onnx_import/src/utils/tensor_external_data.cpp b/ngraph/frontend/onnx_import/src/utils/tensor_external_data.cpp index 77b6db14bebe8c..fe593b47619ece 100644 --- a/ngraph/frontend/onnx_import/src/utils/tensor_external_data.cpp +++ b/ngraph/frontend/onnx_import/src/utils/tensor_external_data.cpp @@ -25,7 +25,7 @@ namespace ngraph if (entry.key() == "offset") m_offset = std::stoi(entry.value()); if (entry.key() == "length") - m_data_lenght = std::stoi(entry.value()); + m_data_length = std::stoi(entry.value()); if (entry.key() == "checksum") m_sha1_digest = std::stoi(entry.value()); } @@ -43,11 +43,11 @@ namespace ngraph if (external_data_stream.fail()) throw error::invalid_external_data{*this}; - std::streamsize read_data_lenght; - if (m_data_lenght == 0) // read entire file - read_data_lenght = external_data_stream.tellg(); + std::streamsize read_data_length; + if (m_data_length == 0) // read entire file + read_data_length = external_data_stream.tellg(); else - read_data_lenght = m_data_lenght; + read_data_length = m_data_length; const auto page_size = 4096; if (m_offset != 0 && m_offset % page_size != 0) @@ -65,8 +65,8 @@ namespace ngraph } std::string read_data; - read_data.resize(read_data_lenght); - external_data_stream.read(&read_data[0], read_data_lenght); + read_data.resize(read_data_length); + external_data_stream.read(&read_data[0], read_data_length); external_data_stream.close(); return read_data; @@ -78,7 +78,7 @@ namespace ngraph s << "ExternalDataInfo("; s << "data_full_path: " << m_data_location; s << ", offset: " << m_offset; - s << ", data_lenght: " << m_data_lenght; + s << ", data_length: " << m_data_length; s << ", sha1_digest: " << m_sha1_digest << ")"; return s.str(); } diff --git a/ngraph/frontend/onnx_import/src/utils/tensor_external_data.hpp b/ngraph/frontend/onnx_import/src/utils/tensor_external_data.hpp index 6db383428ea02b..8af53e407c100f 100644 --- a/ngraph/frontend/onnx_import/src/utils/tensor_external_data.hpp +++ b/ngraph/frontend/onnx_import/src/utils/tensor_external_data.hpp @@ -35,7 +35,7 @@ namespace ngraph private: std::string m_data_location{}; int m_offset = 0; - int m_data_lenght = 0; + int m_data_length = 0; int m_sha1_digest = 0; }; } // namespace detail diff --git a/ngraph/python/tests/test_ngraph/test_data_movement.py b/ngraph/python/tests/test_ngraph/test_data_movement.py index aa1c41dbfd909c..f9693147e979a2 100644 --- a/ngraph/python/tests/test_ngraph/test_data_movement.py +++ b/ngraph/python/tests/test_ngraph/test_data_movement.py @@ -63,17 +63,17 @@ def test_reverse_sequence(): ], dtype=np.int32, ).reshape([2, 3, 4, 2]) - seq_lenghts = np.array([1, 2, 1, 2], dtype=np.int32) + seq_lengths = np.array([1, 2, 1, 2], dtype=np.int32) batch_axis = 2 sequence_axis = 1 input_param = ng.parameter(input_data.shape, name="input", dtype=np.int32) - seq_lengths_param = ng.parameter(seq_lenghts.shape, name="sequence lengths", dtype=np.int32) + seq_lengths_param = ng.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32) model = ng.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis) runtime = get_runtime() computation = runtime.computation(model, input_param, seq_lengths_param) - result = computation(input_data, seq_lenghts) + result = computation(input_data, seq_lengths) expected = np.array( [ diff --git a/ngraph/test/backend/reverse_sequence.in.cpp b/ngraph/test/backend/reverse_sequence.in.cpp index 4dcc954e4c00f9..bab37dfa5a6f41 100644 --- a/ngraph/test/backend/reverse_sequence.in.cpp +++ b/ngraph/test/backend/reverse_sequence.in.cpp @@ -44,8 +44,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) 12, 0, 15, 0, 18, 0, 21, 0, 13, 0, 16, 0, 19, 0, 22, 0, 14, 0, 17, 0, 20, 0, 23, 0, }; - std::vector seq_lenghts{1, 2, 1, 2}; - copy_data(b, seq_lenghts); + std::vector seq_lengths{1, 2, 1, 2}; + copy_data(b, seq_lengths); std::vector expected{ 0, 0, 4, 0, 6, 0, 10, 0, 1, 0, 3, 0, 7, 0, 9, 0, 2, 0, 5, 0, 8, 0, 11, 0, @@ -81,8 +81,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) shared_ptr result = backend->create_tensor(element::i32, shape); - std::vector seq_lenghts{1, 2, 3, 3}; - copy_data(b, seq_lenghts); + std::vector seq_lengths{1, 2, 3, 3}; + copy_data(b, seq_lengths); std::vector input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -137,8 +137,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4d2c3h2w2) copy_data(a, input); - std::vector seq_lenghts{1, 2, 1, 2}; - copy_data(b, seq_lenghts); + std::vector seq_lengths{1, 2, 1, 2}; + copy_data(b, seq_lengths); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -171,8 +171,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_negative_axes) 12, 0, 15, 0, 18, 0, 21, 0, 13, 0, 16, 0, 19, 0, 22, 0, 14, 0, 17, 0, 20, 0, 23, 0, }; - std::vector seq_lenghts{1, 2, 1, 2}; - copy_data(b, seq_lenghts); + std::vector seq_lengths{1, 2, 1, 2}; + copy_data(b, seq_lengths); std::vector expected{ 0, 0, 4, 0, 6, 0, 10, 0, 1, 0, 3, 0, 7, 0, 9, 0, 2, 0, 5, 0, 8, 0, 11, 0, diff --git a/ngraph/test/onnx/onnx_import_external_data.in.cpp b/ngraph/test/onnx/onnx_import_external_data.in.cpp index 6d318ca0436f7d..10638eaa2008da 100644 --- a/ngraph/test/onnx/onnx_import_external_data.in.cpp +++ b/ngraph/test/onnx/onnx_import_external_data.in.cpp @@ -101,7 +101,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_external_invalid_external_data_exception) { EXPECT_PRED_FORMAT2( testing::IsSubstring, - std::string("not_existed_file.data, offset: 4096, data_lenght: 16, sha1_digest: 0)"), + std::string("not_existed_file.data, offset: 4096, data_length: 16, sha1_digest: 0)"), error.what()); } catch (...) @@ -123,7 +123,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_external_invalid_up_dir_path) { EXPECT_PRED_FORMAT2(testing::IsSubstring, std::string("tensor.data, offset: 4096, " - "data_lenght: 16, sha1_digest: 0)"), + "data_length: 16, sha1_digest: 0)"), error.what()); } catch (...) diff --git a/ngraph/test/type_prop/reverse_sequence.cpp b/ngraph/test/type_prop/reverse_sequence.cpp index 38be9f2aec41cf..fa1b57bea45637 100644 --- a/ngraph/test/type_prop/reverse_sequence.cpp +++ b/ngraph/test/type_prop/reverse_sequence.cpp @@ -12,13 +12,13 @@ using namespace ngraph; TEST(type_prop, reverse_sequence_1_dim) { auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{4, 4}); + auto seq_lengths = make_shared(element::f32, Shape{4, 4}); try { size_t batch_axis = 0; size_t seq_axis = 1; - auto bc = make_shared(data, seq_lenghts, batch_axis, seq_axis); - FAIL() << "ReverseSequence c-tor should throw for seq_lenghts whose rank isn't equal to 1"; + auto bc = make_shared(data, seq_lengths, batch_axis, seq_axis); + FAIL() << "ReverseSequence c-tor should throw for seq_lengths whose rank isn't equal to 1"; } catch (const NodeValidationFailure& error) { @@ -34,12 +34,12 @@ TEST(type_prop, reverse_sequence_1_dim) TEST(type_prop, reverse_sequence_batch_index_oob) { auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto seq_lengths = make_shared(element::f32, Shape{3}); try { size_t batch_axis = 3; size_t seq_axis = 1; - auto bc = make_shared(data, seq_lenghts, batch_axis, seq_axis); + auto bc = make_shared(data, seq_lengths, batch_axis, seq_axis); FAIL() << "ReverseSequence c-tor should throw for out-of-bounds batch axis index"; } catch (const ngraph_error& error) @@ -76,12 +76,12 @@ TEST(type_prop, reverse_sequence_sequence_index_oob) TEST(type_prop, reverse_sequence_seq_len_size_equal_to_batch_dim) { auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto seq_lengths = make_shared(element::f32, Shape{3}); try { size_t batch_axis = 0; size_t seq_axis = 1; - auto bc = make_shared(data, seq_lenghts, batch_axis, seq_axis); + auto bc = make_shared(data, seq_lengths, batch_axis, seq_axis); FAIL() << "ReverseSequence c-tor should throw when sequence length size isn't equal to " "batch dimension"; } From c1b1e2e7afc698ac82b32bb1f502ad2e90cd1419 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Thu, 13 May 2021 09:17:32 +0300 Subject: [PATCH 23/27] [IE CLDNN] Fixed FC weights check (#5568) --- inference-engine/src/cldnn_engine/ops/matmul.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/inference-engine/src/cldnn_engine/ops/matmul.cpp b/inference-engine/src/cldnn_engine/ops/matmul.cpp index 289a5cb5d7df64..b18e199581cd15 100644 --- a/inference-engine/src/cldnn_engine/ops/matmul.cpp +++ b/inference-engine/src/cldnn_engine/ops/matmul.cpp @@ -60,8 +60,7 @@ void CreateMatMulOp(Program& p, const std::shared_ptr& o auto shape_a = op->get_input_shape(0); auto shape_b = op->get_input_shape(1); - bool is_fc = ngraph::is_type(op->get_input_node_shared_ptr(1)) || - ngraph::is_type(op->get_input_node_shared_ptr(1)); + bool is_fc = IsNodeOnConstPath(op->get_input_node_shared_ptr(1)); is_fc &= std::count_if(shape_b.begin(), shape_b.end(), [](size_t x) { return x != 1; }) <= 2; if (is_fc) { From 5c4061324724005163e6ad67abf7343a0637e09a Mon Sep 17 00:00:00 2001 From: Tatiana Troilova Date: Thu, 13 May 2021 13:30:20 +0300 Subject: [PATCH 24/27] updated third-party-programs.txt (#5607) --- licensing/third-party-programs.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/licensing/third-party-programs.txt b/licensing/third-party-programs.txt index d59f1aaa5d9b7d..03ddc3e168b14e 100644 --- a/licensing/third-party-programs.txt +++ b/licensing/third-party-programs.txt @@ -543,6 +543,30 @@ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABI ------------------------------------------------------------- +14. Deformable Convolutional Networks + +Copyright (c) 2017 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------------------------------------------------- + The following third party programs have their own third party program files. These additional third party program files are as follows: oneAPI Deep Neural Network Library (oneDNN) Third Party Programs File is available here https://github.com/openvinotoolkit/openvino/blob/master/licensing/onednn_third-party-programs.txt From 516479ae201cb95b1ab73373e03d887c5e13a635 Mon Sep 17 00:00:00 2001 From: Chenhu Wang Date: Thu, 13 May 2021 18:43:42 +0800 Subject: [PATCH 25/27] [CPU]shuffle_channel optimization (#4978) --- .../src/mkldnn_plugin/mkldnn_node.cpp | 4 +- .../src/mkldnn_plugin/mkldnn_node.h | 3 + .../src/mkldnn_plugin/mkldnn_plugin.cpp | 2 + .../nodes/common/permute_kernel.cpp | 14 +- .../nodes/common/permute_kernel.h | 1 + .../src/mkldnn_plugin/nodes/list_tbl.hpp | 1 - .../nodes/mkldnn_shuffle_channels_node.cpp | 247 ++++++++++++++++++ .../nodes/mkldnn_shuffle_channels_node.h | 40 +++ .../mkldnn_plugin/nodes/shuffle_channels.cpp | 201 -------------- .../single_layer_tests/shuffle_channels.cpp | 28 +- .../single_layer_tests/shuffle_channels.cpp | 222 ++++++++++++++++ 11 files changed, 534 insertions(+), 229 deletions(-) create mode 100644 inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp create mode 100644 inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h delete mode 100644 inference-engine/src/mkldnn_plugin/nodes/shuffle_channels.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/single_layer_tests/shuffle_channels.cpp diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index ca4db7fd47c808..1692c411dfefc4 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -174,7 +175,8 @@ static const InferenceEngine::details::caseless_unordered_map { "GatherND", GatherND}, { "OneHot", OneHot}, { "RegionYolo", RegionYolo}, - { "Select", Select} + { "Select", Select}, + { "ShuffleChannels", ShuffleChannels}, }; Type TypeFromName(const std::string type) { diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.h b/inference-engine/src/mkldnn_plugin/mkldnn_node.h index 2a0102cccde213..a592f785541f36 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.h @@ -91,6 +91,7 @@ enum Type { Select, Roll, Reference, + ShuffleChannels, }; Type TypeFromName(const std::string type); @@ -203,6 +204,8 @@ static std::string NameFromType(Type type) { return "Select"; case Roll: return "Roll"; + case ShuffleChannels: + return "ShuffleChannels"; default: return "Unknown"; } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index 6b1ccc162825cd..a597a1233e4c95 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -276,6 +277,7 @@ static void Transformation(CNNNetwork& clonedNetwork, const Config& conf) { pass_config->disable(); pass_config->disable(); pass_config->disable(); + pass_config->disable(); pass_config->disable(); pass_config->disable(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.cpp index 50e21be668fd18..4babccb518681d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.cpp @@ -134,8 +134,8 @@ struct jit_uni_permute_kernel_f32 : public jit_uni_permute_kernel, public jit_ge Xbyak::Reg64 reg_params = abi_param1; - Vmm vmm = Vmm(0); - Xbyak::Xmm xmm = Xbyak::Xmm(0); + Vmm vmm = Vmm(1); + Xbyak::Xmm xmm = Xbyak::Xmm(1); }; PermuteKernel::PermuteKernel(const PermuteParams& params) : params(params) { @@ -275,6 +275,16 @@ void PermuteKernel::execute(const uint8_t* src_data, uint8_t* dst_data, const in referenceExecute(src_data, dst_data, mb); } +void PermuteKernel::execute(const uint8_t* src_data, uint8_t* dst_data) { + SizeVector dst_dims = jcp.dst_block_dims; + if (permute_kernel) { + optimizedExecute(src_data, dst_data, dst_dims[0]); + return; + } + + referenceExecute(src_data, dst_data, dst_dims[0]); +} + void PermuteKernel::optimizedExecute(const uint8_t* src_data, uint8_t* dst_data, const int mb) { SizeVector dst_dims = jcp.dst_block_dims; const SizeVector dst_strides = jcp.dst_strides; diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.h b/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.h index be6427c7007f02..63497f3fa91401 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.h +++ b/inference-engine/src/mkldnn_plugin/nodes/common/permute_kernel.h @@ -55,6 +55,7 @@ class PermuteKernel { public: PermuteKernel(const PermuteParams& params); + void execute(const uint8_t* src_data, uint8_t* dst_data); void execute(const uint8_t* src_data, uint8_t* dst_data, const int mb); private: diff --git a/inference-engine/src/mkldnn_plugin/nodes/list_tbl.hpp b/inference-engine/src/mkldnn_plugin/nodes/list_tbl.hpp index de8fd66b708a7b..669aeffa8022a4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/list_tbl.hpp +++ b/inference-engine/src/mkldnn_plugin/nodes/list_tbl.hpp @@ -41,7 +41,6 @@ MKLDNN_EXTENSION_NODE(ExperimentalDetectronROIFeatureExtractorImpl, Experimental MKLDNN_EXTENSION_NODE(ExperimentalDetectronGenerateProposalsSingleImageImpl, ExperimentalDetectronGenerateProposalsSingleImage); MKLDNN_EXTENSION_NODE(NonMaxSuppressionImpl, NonMaxSuppressionIEInternal); MKLDNN_EXTENSION_NODE(TopKImpl, TopK); -MKLDNN_EXTENSION_NODE(ShuffleChannelsImpl, ShuffleChannels); MKLDNN_EXTENSION_NODE(ExperimentalDetectronPriorGridGeneratorImpl, ExperimentalDetectronPriorGridGenerator); MKLDNN_EXTENSION_NODE(GRNImpl, GRN); MKLDNN_EXTENSION_NODE(BucketizeImpl, Bucketize); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp new file mode 100644 index 00000000000000..10d59bf09776b9 --- /dev/null +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.cpp @@ -0,0 +1,247 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "mkldnn_shuffle_channels_node.h" + +#include +#include +#include +#include "common/tensor_desc_creator.h" + +#include "common/cpu_memcpy.h" +#include "utils/general_utils.h" + +#include +#include + +#define THROW_SHCH_ERROR IE_THROW() << "ShuffleChannels layer with name '" << getName() << "' " + +using namespace mkldnn; +using namespace MKLDNNPlugin; +using namespace InferenceEngine; +using namespace mkldnn::impl; +using namespace mkldnn::impl::cpu::x64; + +bool MKLDNNShuffleChannelsNode::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { + try { + const auto shuffleChannels = std::dynamic_pointer_cast(op); + if (!shuffleChannels) { + errorMessage = "Only opset1 ShuffleChannels operation is supported"; + return false; + } + auto shapeSC = shuffleChannels->get_input_shape(0); + auto rankSC = shapeSC.size(); + auto axisSC = shuffleChannels->get_axis(); + auto groupSC = shuffleChannels->get_group(); + if (axisSC < 0) + axisSC += rankSC; + + if (axisSC < 0 || axisSC >= rankSC) { + errorMessage = "gets incorrect axis number, which should be in range of [-inputRank, inputRank)."; + return false; + } + + if (groupSC == 0 || shapeSC[axisSC] % groupSC) { + errorMessage = "gets incorrect group parameter('group' must evenly divide the channel dimension)."; + return false; + } + } catch (...) { + return false; + } + return true; +} + +MKLDNNShuffleChannelsNode::MKLDNNShuffleChannelsNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) + : MKLDNNNode(op, eng, cache), permuteKernel_(nullptr), supportDynamicBatch_(false) { + std::string errorMessage; + if (!isSupportedOperation(op, errorMessage)) { + IE_THROW(NotImplemented) << errorMessage; + } + + const auto shuffleChannels = std::dynamic_pointer_cast(op); + inShape_ = shuffleChannels->get_input_shape(0); + dataRank_ = inShape_.size(); + axis_ = shuffleChannels->get_axis(); + if (axis_ < 0) + axis_ += dataRank_; + group_ = shuffleChannels->get_group(); + groupSize_ = inShape_[axis_] / group_; + + supportDynamicBatch_ = (axis_ != 0); +} + +void MKLDNNShuffleChannelsNode::getSupportedDescriptors() { +} + +void MKLDNNShuffleChannelsNode::initSupportedPrimitiveDescriptors() { + if (!supportedPrimitiveDescriptors.empty()) + return; + + InferenceEngine::Precision precision = getOriginalInputPrecisionAtPort(0); + const std::set supported_precision_sizes = {1, 2, 4, 8, 16}; + if (supported_precision_sizes.find(precision.size()) == supported_precision_sizes.end()) + THROW_SHCH_ERROR << "has unsupported precision: " << precision.name(); + + impl_desc_type impl_type; + if (mayiuse(cpu::x64::avx512_common)) { + impl_type = impl_desc_type::jit_avx512; + } else if (mayiuse(cpu::x64::avx2)) { + impl_type = impl_desc_type::jit_avx2; + } else if (mayiuse(cpu::x64::sse41)) { + impl_type = impl_desc_type::jit_sse42; + } else { + impl_type = impl_desc_type::ref; + } + + addSupportedPrimDesc({{TensorDescCreatorTypes::nspc, precision}}, + {{TensorDescCreatorTypes::nspc, precision}}, + impl_type, supportDynamicBatch_); + addSupportedPrimDesc({{TensorDescCreatorTypes::ncsp, precision}}, + {{TensorDescCreatorTypes::ncsp, precision}}, + impl_type, supportDynamicBatch_); + // canUseBlocked + if (axis_ != 1) { + addSupportedPrimDesc({{TensorDescCreatorTypes::nCsp8c, precision}}, + {{TensorDescCreatorTypes::nCsp8c, precision}}, + impl_type, supportDynamicBatch_); + addSupportedPrimDesc({{TensorDescCreatorTypes::nCsp16c, precision}}, + {{TensorDescCreatorTypes::nCsp16c, precision}}, + impl_type, supportDynamicBatch_); + } +} + +void MKLDNNShuffleChannelsNode::createPrimitive() { + if (prim) + return; + auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr(); + auto &srcMemPtr = getParentEdgeAt(0)->getMemoryPtr(); + if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr()) + THROW_SHCH_ERROR << "has not allocated destination memory"; + if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr()) + THROW_SHCH_ERROR << "has not allocated input memory"; + if (getSelectedPrimitiveDescriptor() == nullptr) + THROW_SHCH_ERROR << "has unidentified preferable primitive descriptor"; + + const bool isBlocked = getParentEdgeAt(0)->getMemory().GetDesc().isBlockedCFormat(); + + int batchRank = axis_; + int spatialRank = dataRank_ - axis_ - 1; + + // 2 for decomposed axis dim, 1 for composed spatial dim + int reshapedRank = batchRank + 2 + static_cast(spatialRank != 0) + static_cast(isBlocked && (spatialRank == 0)); + PermuteParams params; + params.data_size = getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc.getPrecision().size(); + params.order.resize(reshapedRank, 0); + params.src_block_order.resize(reshapedRank); + params.dst_block_order.resize(reshapedRank); + params.dst_block_dims.resize(reshapedRank); + params.src_block_dims.resize(reshapedRank); + + size_t spatialShapeSize = 1; + if (spatialRank != 0) { + for (int i = batchRank + 1; i < dataRank_; i++) { + spatialShapeSize *= inShape_[i]; + } + } + + auto decomposeAndTranpose = [&](int axis) { + params.src_block_dims[axis] = group_; + params.src_block_dims[axis + 1] = groupSize_; + params.order[axis] = axis + 1; + params.order[axis + 1] = axis; + }; + + const int channelDim = 1; + if (isBlocked) { + size_t blkSize = getParentEdgeAt(0)->getDesc().getBlockingDesc().getBlockDims().back(); + size_t CB = div_up(inShape_[1], blkSize); + SizeVector srcBlockedDims = getParentEdgeAt(0)->getDesc().getBlockingDesc().getBlockDims(); + if (axis_ > channelDim) { // axis on spatial + for (int i = 0; i < batchRank; i++) { + params.order[i] = i; + params.src_block_dims[i] = srcBlockedDims[i]; + } + decomposeAndTranpose(batchRank); + + params.order[batchRank + 2] = batchRank + 2; + params.src_block_dims[batchRank + 2] = spatialShapeSize * blkSize; + } else { // axis on batch + decomposeAndTranpose(0); + size_t spatialShapeSize = CB * blkSize; + for (int i = 2; i < dataRank_; i++) { + spatialShapeSize *= inShape_[i]; + } + params.order[2] = 2; + params.src_block_dims[2] = spatialShapeSize; + } + } else if (getParentEdgeAt(0)->getMemory().GetDesc().isTailCFormat()) { + if (axis_ == channelDim) { // axis on channel + params.order[0] = 0; + params.src_block_dims[0] = inShape_[0]; + params.order[1] = 1; + params.src_block_dims[1] = spatialShapeSize; + decomposeAndTranpose(2); + } else if (axis_ > channelDim) { // axis on spatial + for (int i = 0; i < batchRank; i++) { + if (i == 0) { + params.order[i] = i; + params.src_block_dims[i] = inShape_[i]; + } else if (i == 1) { + params.order[reshapedRank - 1] = reshapedRank - 1; + params.src_block_dims[params.order[reshapedRank - 1]] = inShape_[i]; + } else if (i > 1) { + params.order[i - 1] = i - 1; + params.src_block_dims[i - 1] = inShape_[i]; + } + } + decomposeAndTranpose(batchRank - 1); + + if (spatialRank != 0) { + params.order[batchRank + 1] = batchRank + 1; + params.src_block_dims[batchRank + 1] = spatialShapeSize; + } + } else { // axis on batch + decomposeAndTranpose(0); + params.order[2] = 2; + params.src_block_dims[2] = spatialShapeSize; + } + } else { + for (int i = 0; i < batchRank; i++) { + params.src_block_dims[i] = inShape_[i]; + params.order[i] = i; + } + + decomposeAndTranpose(batchRank); + if (spatialRank != 0) { + params.order[batchRank + 2] = batchRank + 2; + params.src_block_dims[batchRank + 2] = spatialShapeSize; + } + } + + std::iota(params.src_block_order.begin(), params.src_block_order.end(), 0); + std::iota(params.dst_block_order.begin(), params.dst_block_order.end(), 0); + for (size_t i = 0; i < reshapedRank; i++) + params.dst_block_dims[i] = params.src_block_dims[params.order[i]]; + + permuteKernel_ = std::unique_ptr(new PermuteKernel(params)); +} + +void MKLDNNShuffleChannelsNode::execute(mkldnn::stream strm) { + auto srcData = reinterpret_cast(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr()); + auto dstData = reinterpret_cast(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr()); + if (permuteKernel_) { + if (supportDynamicBatch_) + permuteKernel_->execute(srcData, dstData, batchToProcess()); + else + permuteKernel_->execute(srcData, dstData); + } else { + THROW_SHCH_ERROR << "does not initialize permute kernel to execute."; + } +} + +bool MKLDNNShuffleChannelsNode::created() const { + return getType() == ShuffleChannels; +} + +REG_MKLDNN_PRIM_FOR(MKLDNNShuffleChannelsNode, ShuffleChannels); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h new file mode 100644 index 00000000000000..7206b1ae3637bc --- /dev/null +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_shuffle_channels_node.h @@ -0,0 +1,40 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include "common/permute_kernel.h" + +namespace MKLDNNPlugin { + +class MKLDNNShuffleChannelsNode : public MKLDNNNode { +public: + MKLDNNShuffleChannelsNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache); + ~MKLDNNShuffleChannelsNode() override = default; + + void getSupportedDescriptors() override; + void initSupportedPrimitiveDescriptors() override; + void createPrimitive() override; + void execute(mkldnn::stream strm) override; + bool created() const override; + + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; + +private: + ngraph::Shape inShape_; + int dataRank_; + int axis_; + size_t group_; + size_t groupSize_; + + std::unique_ptr permuteKernel_; + bool supportDynamicBatch_; +}; + +} // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/nodes/shuffle_channels.cpp b/inference-engine/src/mkldnn_plugin/nodes/shuffle_channels.cpp deleted file mode 100644 index bec57b38a39779..00000000000000 --- a/inference-engine/src/mkldnn_plugin/nodes/shuffle_channels.cpp +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "base.hpp" - -#include -#include -#include -#include -#include -#include "ie_parallel.hpp" -#include "common/cpu_memcpy.h" -#include -#include "common/tensor_desc_creator.h" - -namespace InferenceEngine { -namespace Extensions { -namespace Cpu { - -using MKLDNNPlugin::TensorDescCreatorTypes; - -class ShuffleChannelsImpl: public ExtLayerBase { -#define CNTR_SIZE 3 - -__inline size_t initter(size_t start, size_t size, size_t* counters, size_t* own_dims, size_t* ownStrides) { - size_t i = start; - size_t idx = 0; - for (int j = size - 1; j >= 0; j--) { - counters[j] = i % own_dims[j]; - idx += counters[j] * ownStrides[j]; - i /= own_dims[j]; - } - return idx; -} - -__inline size_t updater(size_t idx, size_t size, size_t* counters, size_t* own_dims, size_t* ownStrides) { - size_t i = 1; - for (int j = size - 1; j >= 0; j--) { - counters[j]++; - if (counters[j] < own_dims[j]) { - idx += ownStrides[j]; - break; - } else { - counters[j] = 0; - i = 0; - } - } - if (!i) { - for (idx = 0; i < CNTR_SIZE; ++i) - idx += counters[i] * ownStrides[i]; - } - return idx; -} - -public: - bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { - try { - auto scOp = ngraph::as_type_ptr(op); - if (!scOp) { - errorMessage = "Node is not an instance of the TopK from the operations set v1."; - return false; - } - - if (_supported_precisions_sizes.find(op->get_input_element_type(0).size()) == _supported_precisions_sizes.end()) { - errorMessage = "Unsupported precision: " + op->get_input_element_type(0).get_type_name(); - return false; - } - } catch (...) { - return false; - } - return true; - } - - explicit ShuffleChannelsImpl(const std::shared_ptr& op) { - try { - std::string errorMessage; - if (!isSupportedOperation(op, errorMessage)) { - IE_THROW(NotImplemented) << errorMessage; - } - auto scOp = ngraph::as_type_ptr(op); - auto& dstDims = op->get_output_shape(0); - - int64_t axis = scOp->get_axis(); - if (axis < 0) - axis += dstDims.size(); - - if (axis < 0 || axis >= static_cast(dstDims.size())) - IE_THROW() << op->get_friendly_name() << " Incorrect input parameters dimensions and axis number!"; - - size_t group = scOp->get_group(); - if (group == 0 || dstDims[axis] % group) - IE_THROW() << op->get_friendly_name() << " Group parameter must evenly divide the channel dimension!"; - - // Find number of dictionaries, index range and data length - own_dims[0] = 1; - for (int i = 0; i < axis; i++) - own_dims[0] *= dstDims[i]; - - for (size_t i = axis + 1; i < dstDims.size(); i++) - dataLength *= dstDims[i]; - - if (dataLength == 0) - IE_THROW() << op->get_friendly_name() << " Incorrect input parameters dimension!"; - - own_dims[1] = dstDims[axis] / group; - own_dims[2] = group; - ownStrides[0] = dstDims[axis]; - ownStrides[1] = 1; - ownStrides[2] = own_dims[1]; - work_amount_dst = ownStrides[0] * own_dims[0]; - - addConfig(op, {{TensorDescCreatorTypes::ncsp, details::convertPrecision(op->get_input_element_type(0))}}, - {{TensorDescCreatorTypes::ncsp, details::convertPrecision(op->get_input_element_type(0))}}); - } catch (InferenceEngine::Exception &ex) { - errorMsg = ex.what(); - throw; - } - } - - StatusCode execute(std::vector& inputs, std::vector& outputs, ResponseDesc *resp) noexcept override { - switch (inputs[0]->getTensorDesc().getPrecision().size()) { - case 1: { - process_data::value_type>(inputs, outputs); - break; - } - case 2: { - process_data::value_type>(inputs, outputs); - break; - } - case 4: { - process_data::value_type>(inputs, outputs); - break; - } - case 8: { - process_data::value_type>(inputs, outputs); - break; - } - default: { - if (resp) { - std::string errorMsg = "ShuffleChannels layer does not support precision '" - + std::string(inputs[0]->getTensorDesc().getPrecision().name()) + "'"; - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - } - return GENERAL_ERROR; - } - } - - return OK; - } - - template - void process_data(std::vector& inputs, std::vector& outputs) noexcept { - const T* src_data = inputs[0]->cbuffer().as() + - inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - T* dst_data = outputs[0]->cbuffer().as() + - outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); - - if (dataLength > 1) { - // Vectorized & Parallel - parallel_nt(0, [&](const int ithr, const int nthr) { - size_t start = 0, end = 0, src_idx = 0; - size_t counters[CNTR_SIZE] = { 0 }; - splitter(work_amount_dst, nthr, ithr, start, end); - src_idx = initter(start, CNTR_SIZE, counters, own_dims, ownStrides); - for (size_t iwork = start, dst_idx = start * dataLength; iwork < end; ++iwork, dst_idx += dataLength) { - cpu_memcpy(&dst_data[dst_idx], &src_data[dataLength * src_idx], sizeof(T) * dataLength); - src_idx = updater(src_idx, CNTR_SIZE, counters, own_dims, ownStrides); - } - }); - } else { - // Parallel - parallel_nt(0, [&](const int ithr, const int nthr) { - size_t start = 0, end = 0, src_idx = 0; - size_t counters[CNTR_SIZE] = { 0 }; - splitter(work_amount_dst, nthr, ithr, start, end); - src_idx = initter(start, CNTR_SIZE, counters, own_dims, ownStrides); - for (size_t iwork = start; iwork < end; ++iwork) { - dst_data[iwork] = src_data[src_idx]; - src_idx = updater(src_idx, CNTR_SIZE, counters, own_dims, ownStrides); - } - }); - } - } - -private: - size_t dataLength = 1; - size_t work_amount_dst; - size_t own_dims[CNTR_SIZE]; - size_t ownStrides[CNTR_SIZE]; - - static const std::set _supported_precisions_sizes; -}; - -const std::set ShuffleChannelsImpl::_supported_precisions_sizes = {1, 2, 4, 8}; - -REG_FACTORY_FOR(ShuffleChannelsImpl, ShuffleChannels); - -} // namespace Cpu -} // namespace Extensions -} // namespace InferenceEngine diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/shuffle_channels.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/shuffle_channels.cpp index 86bcb113414cb2..b36a9e0713b0b0 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/shuffle_channels.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/shuffle_channels.cpp @@ -11,27 +11,19 @@ using namespace LayerTestsDefinitions; namespace { const std::vector netPrecisions = { - InferenceEngine::Precision::I8, InferenceEngine::Precision::U8, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::I32, + InferenceEngine::Precision::U16, InferenceEngine::Precision::FP32 }; -const std::vector axes = {0, 1, 2, 3}; -const std::vector negativeAxes = {-4, -3, -2, -1}; -const std::vector groups = {1, 2, 3}; +const std::vector axes = {-4, -3, -2, -1, 0, 1, 2, 3}; +const std::vector groups = {1, 2, 3, 6}; const auto shuffleChannelsParams4D = ::testing::Combine( ::testing::ValuesIn(axes), ::testing::ValuesIn(groups) ); -const auto shuffleChannelsParamsNegativeAxis4D = ::testing::Combine( - ::testing::ValuesIn(negativeAxes), - ::testing::ValuesIn(groups) -); - INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels4D, ShuffleChannelsLayerTest, ::testing::Combine( shuffleChannelsParams4D, @@ -40,19 +32,7 @@ INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels4D, ShuffleChannelsLayerTest, ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), ::testing::Values(InferenceEngine::Layout::ANY), ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({6, 6, 6, 6})), - ::testing::Values(CommonTestUtils::DEVICE_CPU)), - ShuffleChannelsLayerTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannelsNegativeAxis4D, ShuffleChannelsLayerTest, - ::testing::Combine( - shuffleChannelsParamsNegativeAxis4D, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector({6, 6, 6, 6})), + ::testing::Values(std::vector({12, 18, 30, 36})), ::testing::Values(CommonTestUtils::DEVICE_CPU)), ShuffleChannelsLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/shuffle_channels.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/shuffle_channels.cpp new file mode 100644 index 00000000000000..0548aad8c5e7be --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/shuffle_channels.cpp @@ -0,0 +1,222 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "test_utils/cpu_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +using namespace InferenceEngine; +using namespace CPUTestUtils; + +namespace CPULayerTestsDefinitions { + +typedef std::tuple< + LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet, + CPUSpecificParams> ShuffleChannelsLayerCPUTestParamsSet; + +class ShuffleChannelsLayerCPUTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = obj.param; + + std::ostringstream result; + result << LayerTestsDefinitions::ShuffleChannelsLayerTest::getTestCaseName( + testing::TestParamInfo(basicParamsSet, 0)); + + result << CPUTestsBase::getTestCaseName(cpuParams); + + return result.str(); + } + +protected: + void SetUp() { + LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet basicParamsSet; + CPUSpecificParams cpuParams; + std::tie(basicParamsSet, cpuParams) = this->GetParam(); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + + LayerTestsDefinitions::shuffleChannelsSpecificParams shuffleChannelsParams; + std::vector inputShape; + Precision netPrecision; + std::tie(shuffleChannelsParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = basicParamsSet; + + int axis, group; + std::tie(axis, group) = shuffleChannelsParams; + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto paramOuts = ngraph::helpers::convert2OutputVector( + ngraph::helpers::castOps2Nodes(params)); + auto shuffleChannels = std::dynamic_pointer_cast( + ngraph::builder::makeShuffleChannels(paramOuts[0], axis, group)); + shuffleChannels->get_rt_info() = getCPUInfo(); + ngraph::ResultVector results{std::make_shared(shuffleChannels)}; + function = std::make_shared(results, params, "shuffleChannels"); + + if (selectedType.empty()) { + selectedType = getPrimitiveType(); + } + selectedType.push_back('_'); + selectedType += netPrecision.name(); + } +}; + +TEST_P(ShuffleChannelsLayerCPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + Run(); + CheckPluginRelatedResults(executableNetwork, "ShuffleChannels"); +} + +namespace { + +/* CPU PARAMS */ +std::vector filterCPUInfoForDevice4D() { + std::vector resCPUParams; + if (with_cpu_x86_avx512f()) { + resCPUParams.push_back(CPUSpecificParams{{nchw}, {nchw}, {"jit_avx512"}, "jit_avx512"}); + resCPUParams.push_back(CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"}); + } else if (with_cpu_x86_avx2()) { + resCPUParams.push_back(CPUSpecificParams{{nchw}, {nchw}, {"jit_avx2"}, "jit_avx2"}); + resCPUParams.push_back(CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}); + } else if (with_cpu_x86_sse42()) { + resCPUParams.push_back(CPUSpecificParams{{nchw}, {nchw}, {"jit_sse42"}, "jit_sse42"}); + resCPUParams.push_back(CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}); + } else { + resCPUParams.push_back(CPUSpecificParams{{nchw}, {nchw}, {"ref"}, "ref"}); + } + return resCPUParams; +} + +std::vector filterCPUInfoForDevice5D() { + std::vector resCPUParams; + if (with_cpu_x86_avx512f()) { + resCPUParams.push_back(CPUSpecificParams{{ncdhw}, {ncdhw}, {"jit_avx512"}, "jit_avx512"}); + resCPUParams.push_back(CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx512"}, "jit_avx512"}); + } else if (with_cpu_x86_avx2()) { + resCPUParams.push_back(CPUSpecificParams{{ncdhw}, {ncdhw}, {"jit_avx2"}, "jit_avx2"}); + resCPUParams.push_back(CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}); + } else if (with_cpu_x86_sse42()) { + resCPUParams.push_back(CPUSpecificParams{{ncdhw}, {ncdhw}, {"jit_sse42"}, "jit_sse42"}); + resCPUParams.push_back(CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}); + } else { + resCPUParams.push_back(CPUSpecificParams{{ncdhw}, {ncdhw}, {"ref"}, "ref"}); + } + return resCPUParams; +} + +std::vector filterCPUInfoForDevice4DBlock() { + std::vector resCPUParams; + if (with_cpu_x86_avx512f()) { + resCPUParams.push_back(CPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512"}, "jit_avx512"}); + } else if (with_cpu_x86_avx2()) { + resCPUParams.push_back(CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2"}, "jit_avx2"}); + } else if (with_cpu_x86_sse42()) { + resCPUParams.push_back(CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}); + } + return resCPUParams; +} + +std::vector filterCPUInfoForDevice5DBlock() { + std::vector resCPUParams; + if (with_cpu_x86_avx512f()) { + resCPUParams.push_back(CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {"jit_avx512"}, "jit_avx512"}); + } else if (with_cpu_x86_avx2()) { + resCPUParams.push_back(CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"}); + } else if (with_cpu_x86_sse42()) { + resCPUParams.push_back(CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}); + } + return resCPUParams; +} +/* ========== */ + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::BF16, + InferenceEngine::Precision::I8 +}; + +const auto shuffleChannelsParams4D = ::testing::Combine( + ::testing::ValuesIn(std::vector{-4, -2, 0, 1, 2, 3}), + ::testing::ValuesIn(std::vector{1, 2, 4, 8}) +); + +const auto shuffleChannelsParams5D = ::testing::Combine( + ::testing::ValuesIn(std::vector{-5, -1, 0, 1, 2, 3, 4}), + ::testing::ValuesIn(std::vector{1, 2, 3, 6}) +); + +INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels4D, ShuffleChannelsLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + shuffleChannelsParams4D, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({16, 24, 32, 40})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice4D())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels5D, ShuffleChannelsLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + shuffleChannelsParams5D, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({12, 18, 12, 18, 24})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice5D())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +const auto shuffleChannelsParams4DBlock = ::testing::Combine( + ::testing::ValuesIn(std::vector{-4, -2, -1, 0, 2, 3}), + ::testing::ValuesIn(std::vector{1, 2, 4, 8}) +); + +const auto shuffleChannelsParams5DBlock = ::testing::Combine( + ::testing::ValuesIn(std::vector{-5, -2, -1, 0, 2, 3, 4}), + ::testing::ValuesIn(std::vector{1, 2, 3, 6}) +); + +INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels4DBlock, ShuffleChannelsLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + shuffleChannelsParams4DBlock, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({40, 32, 24, 16})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice4DBlock())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_ShuffleChannels5DBlock, ShuffleChannelsLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + shuffleChannelsParams5DBlock, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(InferenceEngine::Layout::ANY), + ::testing::Values(std::vector({18, 12, 18, 12, 30})), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice5DBlock())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +} // namespace + +} // namespace CPULayerTestsDefinitions From 3d03f02101692df4827238ac974a4290b5c3ae35 Mon Sep 17 00:00:00 2001 From: Michael Nosov Date: Thu, 13 May 2021 17:57:20 +0300 Subject: [PATCH 26/27] Update according to 'main' function change --- model-optimizer/mo/main.py | 2 +- model-optimizer/mo/main_caffe.py | 2 +- model-optimizer/mo/main_kaldi.py | 2 +- model-optimizer/mo/main_mxnet.py | 2 +- model-optimizer/mo/main_onnx.py | 2 +- model-optimizer/mo/main_tf.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/model-optimizer/mo/main.py b/model-optimizer/mo/main.py index f55280124b4b8e..e0d26315739d97 100644 --- a/model-optimizer/mo/main.py +++ b/model-optimizer/mo/main.py @@ -451,4 +451,4 @@ def main(cli_parser: argparse.ArgumentParser, fem, framework: str): if __name__ == "__main__": from mo.utils.cli_parser import get_all_cli_parser - sys.exit(main(get_all_cli_parser(), None)) + sys.exit(main(*get_all_cli_parser(), None)) diff --git a/model-optimizer/mo/main_caffe.py b/model-optimizer/mo/main_caffe.py index bcba5c8d611735..bd2f4b62bf4d98 100644 --- a/model-optimizer/mo/main_caffe.py +++ b/model-optimizer/mo/main_caffe.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_caffe_cli_parser(), 'caffe')) + sys.exit(main(get_caffe_cli_parser(), None, 'caffe')) diff --git a/model-optimizer/mo/main_kaldi.py b/model-optimizer/mo/main_kaldi.py index 15233333203adb..e2105e32e5dec6 100644 --- a/model-optimizer/mo/main_kaldi.py +++ b/model-optimizer/mo/main_kaldi.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_kaldi_cli_parser(), 'kaldi')) + sys.exit(main(get_kaldi_cli_parser(), None, 'kaldi')) diff --git a/model-optimizer/mo/main_mxnet.py b/model-optimizer/mo/main_mxnet.py index 91cb19531592e5..b22a277231b0f4 100644 --- a/model-optimizer/mo/main_mxnet.py +++ b/model-optimizer/mo/main_mxnet.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_mxnet_cli_parser(), 'mxnet')) + sys.exit(main(get_mxnet_cli_parser(), None, 'mxnet')) diff --git a/model-optimizer/mo/main_onnx.py b/model-optimizer/mo/main_onnx.py index 3bf882d65e9ed0..e0569f4c1694a0 100644 --- a/model-optimizer/mo/main_onnx.py +++ b/model-optimizer/mo/main_onnx.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_onnx_cli_parser(), 'onnx')) + sys.exit(main(get_onnx_cli_parser(), None, 'onnx')) diff --git a/model-optimizer/mo/main_tf.py b/model-optimizer/mo/main_tf.py index 3c55e4ac0e2d05..5464b114082a6d 100644 --- a/model-optimizer/mo/main_tf.py +++ b/model-optimizer/mo/main_tf.py @@ -7,4 +7,4 @@ if __name__ == "__main__": from mo.main import main - sys.exit(main(get_tf_cli_parser(), 'tf')) + sys.exit(main(get_tf_cli_parser(), None, 'tf')) From 7c132813cfeac5eba07e9798b5836bb3732e6444 Mon Sep 17 00:00:00 2001 From: Michael Nosov Date: Thu, 13 May 2021 18:10:02 +0300 Subject: [PATCH 27/27] update BOM file --- model-optimizer/automation/package_BOM.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 865e499c410ec2..3b053c978b6f5f 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -948,6 +948,7 @@ mo/main_caffe.py mo/main_kaldi.py mo/main_mxnet.py mo/main_onnx.py +mo/main_pdpd.py mo/main_tf.py mo/middle/__init__.py mo/middle/passes/__init__.py